diff --git a/_data/home-content.yml b/_data/home-content.yml index 18f73ad11..8b773f8e5 100644 --- a/_data/home-content.yml +++ b/_data/home-content.yml @@ -1,140 +1,272 @@ -- title: Getting Started + + + +- title: Getting started icon: images/home-icons/started.svg url: '' links: - - title: Introducing Codefresh - localurl: /docs/getting-started/csdp-introduction/ - - title: Quick start - localurl: /docs/getting-started/quick-start/ - - title: Concepts - localurl: /docs/getting-started/main-concepts/ - - title: Entity model - localurl: /docs/getting-started/entity-model/ - - title: Architecture - localurl: /docs/getting-started/architecture/ - - title: GitOps approach - localurl: /docs/getting-started/gitops/ - - title: Frequently asked questions - localurl: /docs/getting-started/faq/ + - title: Introduction to Codefresh + localurl: /docs/getting-started/intro-to-codefresh/ + - title: Codefresh for CI + localurl: /docs/getting-started/ci-codefresh/ + - title: Codefresh for CD + localurl: /docs/getting-started/cd-codefresh/ + - title: Codefresh for GitOps + localurl: /docs/getting-started/gitops-codefresh/ + - title: Concepts in Codefresh + localurl: /docs/getting-started/concepts/ -- title: Clients - icon: images/home-icons/client.svg + +- title: Quick starts + icon: images/home-icons/started.svg url: '' links: - - title: Codefresh CLI - localurl: /docs/clients/csdp-cli/ + - title: Create Codefresh account + localurl: /docs/quick-start/create-codefresh-account/ + - title: CI/CD quick starts + localurl: /docs/quick-start/ci-quick-start/ + - title: GitOps quick starts + localurl: /docs/quick-start/gitops-quickstart/gitops-quick-start/ -- title: Installation - icon: images/home-icons/runtimes.svg +- title: Pipeline integrations + icon: images/home-icons/cloud-integrations.png + links: + - title: Hosted GitOps + localurl: /docs/integrations/codefresh-hosted-gitops/ + - title: Git Providers + localurl: /docs/integrations/git-providers/ + - title: Kubernetes + localurl: /docs/integrations/kubernetes/ + - title: Amazon Web Services + localurl: /docs/integrations/amazon-web-services/ + - title: Microsoft Azure + localurl: /docs/integrations/microsoft-azure/ + - title: Google Cloud + localurl: /docs/integrations/google-cloud/ + - title: Docker Registries + localurl: /docs/integrations/docker-registries/ + - title: Secret Storage + localurl: /docs/integrations/secret-storage/ + - title: Helm + localurl: /docs/integrations/helm/ + - title: Argo CD + localurl: /docs/integrations/argocd/ + - title: Datadog + localurl: /docs/integrations/datadog/ + - title: Jenkins integration/migration + localurl: /docs/integrations/jenkins-integration/ + - title: Codefresh API + localurl: /docs/integrations/codefresh-api/ + + + +- title: GitOps integrations + icon: images/home-icons/integrations.svg url: '' links: - - title: Installation environments - localurl: /docs/runtime/installation-options/ - - title: Set up a hosted runtime environment - localurl: /docs/runtime/hosted-runtime/ - - title: Hybrid runtime requirements - localurl: /docs/runtime/requirements/ - - title: Install hybrid runtimes - localurl: /docs/runtime/installation - - title: Manage provisioned runtimes - localurl: /docs/runtime/monitor-manage-runtimes/ - - title: Monitor provisioned hybrid runtimes - localurl: /docs/runtime/monitoring-troubleshooting/ - - title: Add external clusters to runtimes - localurl: /docs/runtime/managed-cluster/ - - title: Add Git Sources to runtimes - localurl: /docs/runtime/git-sources/ + - title: Image enrichment with GitOps integrations + localurl: /docs/gitops-integrations/image-enrichment-overview/ + - title: Codefresh Classic for GitOps + localurl: /docs/gitops-integrations/ci-integrations/codefresh-classic/ + - title: GitHub Actions for GitOps + localurl: /docs/gitops-integrations/ci-integrations/github-actions/ + - title: Jenkins for GitOps + localurl: /docs/gitops-integrations/ci-integrations/jenkins/ + - title: Jira for GitOps + localurl: /docs/gitops-integrations/issue-tracking/jira/ + - title: Amazon ECR for GitOps + localurl: /docs/gitops-integrations/container-registries/amazon-ecr/ + - title: Docker Hub Registry for GitOps + localurl: /docs/gitops-integrations/container-registries/dockerhub/ + - title: GitHub Container Registry for GitOps + localurl: /docs/gitops-integrations/container-registries/github-cr/ + - title: JFrog Artifactory for GitOps + localurl: /docs/gitops-integrations/container-registries/jfrog/ + - title: Quay Registry for GitOps + localurl: /docs/gitops-integrations/container-registries/quay/ + + + +- title: Dashboards & insights + icon: images/home-icons/guides.png + url: '' + links: + - title: Home dashboard + localurl: /docs/dashboards/home-dashboard/ + - title: DORA metrics + localurl: /docs/dashboards/dora-metrics/ + + + + +- title: CI/CD guides + icon: images/home-icons/guides.png + url: '' + links: + - title: Building your app + localurl: /docs/ci-cd-guides/packaging-compilation/ + - title: Building Docker images + localurl: /docs/ci-cd-guides/building-docker-images/ + - title: Accessing a Docker registry from Kubernetes cluster + localurl: /docs/ci-cd-guides/access-docker-registry-from-kubernetes/ + - title: Working with Docker registries + localurl: /docs/ci-cd-guides/working-with-docker-registries/ + - title: Adding config maps to namespaces + localurl: /docs/ci-cd-guides/add-config-maps-to-your-namespaces/ + - title: Pull Requests and branches + localurl: /docs/ci-cd-guides/pull-request-branches/ + - title: Pipelines for microservices + localurl: /docs/ci-cd-guides/microservices/ + - title: Deploying to predefined environments + localurl: /docs/ci-cd-guides/environment-deployments/ + - title: Previewing dynamic environments + localurl: /docs/ci-cd-guides/preview-environments/ + - title: Helm best practices + localurl: /docs/ci-cd-guides/helm-best-practices/ + - title: Templating for Kubernetes + localurl: /docs/ci-cd-guides/kubernetes-templating/ + - title: Progressive Delivery + localurl: /docs/ci-cd-guides/progressive-delivery/ + + + +- title: Example catalog + icon: images/home-icons/tutorial.svg + url: '' + links: + - title: CI examples + localurl: /docs/example-catalog/ci-examples + - title: CD examples + localurl: /docs/example-catalog/cd-examples + - title: GitOps examples + localurl: /docs/example-catalog/gitops-examples + - title: Pipelines icon: images/home-icons/pipeline.svg url: '' links: - - title: Creation - localurl: /docs/pipelines/create-pipeline - - title: Configure artifact repository - localurl: /docs/pipelines/configure-artifact-repository/ - - title: Selectors for concurrency synchronization - localurl: /docs/pipelines/concurrency-limit/ - - title: Sharing file systems - localurl: /docs/pipelines/sharing-file-system/ - + - title: Introduction to Pipelines + localurl: /docs/pipelines/introduction-to-codefresh-pipelines/ + - title: Creating Pipelines + localurl: /docs/pipelines/pipelines/ + - title: Pipeline triggers + localurl: /docs/pipelines/triggers/ + - title: Monitoring pipelines + localurl: /docs/pipelines/monitoring-pipelines/ + - title: Shared Configuration + localurl: /docs/pipelines/shared-configuration/ + - title: Using secrets + localurl: /docs/pipelines/secrets-store/ + - title: Pipeline caching + localurl: /docs/pipelines/pipeline-caching/ + - title: Running pipelines locally + localurl: /docs/pipelines/running-pipelines-locally/ + - title: Debugging pipelines + localurl: /docs/pipelines/debugging-pipelines/ + + -- title: Deployment +- title: Deployments icon: images/home-icons/deployment.svg url: '' links: - - title: Creating applications - localurl: /docs/deployment/create-application/ + - title: Deployment options for Kubernetes + localurl: /docs/deployments/kubernetes/deployment-options-to-kubernetes/ + - title: Managing Kubernetes clusters + localurl: /docs/deployments/kubernetes/manage-kubernetes/ + - title: Using Helm in Codefresh pipelines + localurl: /docs/deployments/helm/using-helm-in-codefresh-pipeline/ + - title: Managing Helm releases + localurl: /docs/deployments/helm/helm-releases-management/ + - title: Promoting Helm environments + localurl: /docs/deployments/helm/helm-environment-promotion/ + - title: Creating GitOps applications + localurl: /docs/deployment/gitops/create-application/ - title: Monitoring applications - localurl: /docs/deployment/applications-dashboard/ + localurl: /docs/deployment/gitops/applications-dashboard/ - title: Managing applications - localurl: /docs/deployment/manage-application/ - - title: Images in Codefresh - localurl: /docs/deployment/images/ - - title: Install Argo Rollouts - localurl: /docs/deployment/install-argo-rollouts/ + localurl: /docs/deployment/gitops/manage-application/ -- title: Reports & Insights - icon: images/home-icons/guides.png + + +- title: Workflows + icon: images/home-icons/pipeline.svg url: '' links: - - title: Home dashboard - localurl: /docs/reporting/home-dashboard/ - - title: DORA metrics - localurl: /docs/reporting/dora-metrics/ - + - title: Creating workflows + localurl: /docs/workflows/create-pipeline + - title: Nested workflows + localurl: /docs/workflows/nested-workflows/ + - title: Configure artifact repository + localurl: /docs/workflows/configure-artifact-repository/ + - title: Selectors for concurrency synchronization + localurl: /docs/workflows/concurrency-limit/ + - title: Sharing file systems + localurl: /docs/workflows/sharing-file-system/ + -- title: Image enrichment - icon: images/home-icons/integrations.svg + + +- title: Clients + icon: images/home-icons/client.svg + url: '' + links: + - title: Codefresh CLI + localurl: /docs/clients/csdp-cli/ + + +- title: Installation + icon: images/home-icons/runtimes.svg url: '' links: - - title: Image enrichment with integrations - localurl: /docs/integrations/image-enrichment-overview/ - - title: Codefresh Classic - localurl: /docs/integrations/ci-integrations/codefresh-classic/ - - title: GitHub Actions - localurl: /docs/integrations/ci-integrations/github-actions/ - - title: Jenkins - localurl: /docs/integrations/ci-integrations/jenkins/ - - title: Jira - localurl: /docs/integrations/issue-tracking/jira/ - - title: Amazon ECR - localurl: /docs/integrations/container-registries/amazon-ecr/ - - title: Docker Hub Registry - localurl: /docs/integrations/container-registries/dockerhub/ - - title: GitHub Container Registry - localurl: /docs/integrations/container-registries/github-cr/ - - title: JFrog Artifactory - localurl: /docs/integrations/container-registries/jfrog/ - - title: Quay Registry - localurl: /docs/integrations/container-registries/quay/ + - title: Installation environments + localurl: /docs/installation/installation-options/ + - title: Codefresh Runner installation + localurl: /docs/installation/codefresh-runner/ + - title: On-Premises installation + localurl: /docs/installation/codefresh-on-prem/ + - title: On-Premises upgrade + localurl: /docs/installation/codefresh-on-prem-upgrade/ + - title: Hosted GitOps installation + localurl: /docs/installation/gitops/hosted-runtime/ + - title: Hybrid GitOps installation + localurl: /docs/installation/gitops/hybrid-gitops/ + - title: Administration icon: images/home-icons/administration.svg url: '' links: - - title: Manage users - localurl: /docs/administration/add-users/ + - title: Create a Codefresh account + localurl: /docs/administration/account-user-management/create-codefresh-account/ + - title: Adding users and teams + localurl: /docs/administration/account-user-management/add-users/ - title: Single Sign-On - localurl: /docs/administration/single-sign-on/ + localurl: /docs/single-sign-on/ - title: Set up OAuth2 authentication for Git providers - localurl: /docs/administration/oauth-setup/ - - title: User settings - localurl: /docs/administration/user-settings/ - - title: Access Control - localurl: /docs/administration/access-control/ + localurl: /docs/administration/account-user-management/oauth-setup/ + - title: Access control + localurl: /docs/administration/account-user-management/access-control/ - title: Audit - localurl: /docs/administration/audit/ + localurl: /docs/administration/account-user-management/audit/ - title: Codefresh IP addresses - localurl: /docs/administration/platform-ip-addresses/ + localurl: /docs/administration/account-user-management/platform-ip-addresses/ + - title: User settings + localurl: /docs/administration/user-self-management/user-settings/ + - title: Manage Git PATs + localurl: /docs/administration/user-self-management/manage-pats/ + - title: Reference icon: images/home-icons/guides.png url: '' links: + - title: Runner installation behind firewalls + url: /docs/behind-the-firewall/ - title: Git tokens localurl: /docs/reference/git-tokens/ - title: Secrets @@ -142,12 +274,7 @@ - title: Shared configuration repo localurl: /docs/reference/shared-configuration/ -- title: Troubleshooting - icon: images/home-icons/troubleshooting.svg - url: '' - links: - - title: Runtimes - localurl: /docs/troubleshooting/runtime-issues + diff --git a/_data/nav.yml b/_data/nav.yml index cfb2f70a6..7561d3818 100644 --- a/_data/nav.yml +++ b/_data/nav.yml @@ -1,112 +1,289 @@ + - title: Getting started url: "/getting-started" pages: - - title: Introducing Codefresh - url: "/csdp-introduction" - - title: Quick start - url: "/quick-start" - sub-pages: - - title: Provision a hosted runtime - url: "/install-hosted" - - title: Prepare for hybrid runtime installation - url: "/verify-requirements" - - title: Install a hybrid runtime - url: "/runtime" - - title: Create an application - url: "/create-app-ui" - - title: Create and commit resources for application - url: "/create-app-specs" - - title: Update the image tag for application - url: "/create-rollout" - - title: Trigger the Hello World example pipeline - url: "/hello-world" - - title: Create a basic CI delivery pipeline - url: "/create-ci-pipeline" - - - - title: Main concepts - url: "/main-concepts" - - title: Entity model - url: "/entity-model" - - title: Architecture - url: "/architecture" - - title: GitOps approach - url: "/gitops" - - title: Frequently asked questions - url: "/faq" - -- title: Clients - url: "/clients" - pages: - - title: Download CLI - url: "/csdp-cli" + - title: Introduction to Codefresh + url: "/intro-to-codefresh" + - title: Codefresh for CI + url: "/ci-codefresh" + - title: Codefresh for CD + url: "/cd-codefresh" + - title: Codefresh for GitOps + url: "/gitops-codefresh" + - title: Concepts in Codefresh + url: "/concepts" - -- title: Installation - url: "/runtime" +- title: Quick starts + url: "/quick-start" pages: - - title: Installation environments - url: "/installation-options" - - title: Set up a hosted runtime environment - url: "/hosted-runtime" - - title: Hybrid runtime requirements - url: "/requirements" - - title: Install hybrid runtimes - url: "/installation" - - title: Manage provisioned runtimes - url: "/monitor-manage-runtimes" - - title: Monitor provisioned hybrid runtimes - url: "/monitoring-troubleshooting" - - title: Add external clusters to runtimes - url: "/managed-cluster" - - title: Add Git Sources to runtimes - url: "/git-sources" + - title: Create Codefresh account + url: "/create-codefresh-account" + - title: CI/CD quick starts + url: "/ci-quickstart" + sub-pages: + - title: CI pipeline quick start + url: "/create-ci-pipeline" + - title: Kubernetes deployment quick start + url: "/deploy-to-kubernetes" + - title: Helm quick start + url: "/deploy-with-helm" + - title: On-demand environment quick start + url: "/on-demand-environments" + - title: GitOps quick starts + url: "/gitops-quickstart" + sub-pages: + - title: Provision a hosted runtime + url: "/install-hosted" + - title: Prepare for hybrid runtime installation + url: "/verify-requirements" + - title: Install a hybrid runtime + url: "/runtime" + - title: Create an application + url: "/create-app-ui" + - title: Create and commit resources for application + url: "/create-app-specs" + - title: Update the image tag for application + url: "/create-rollout" -- title: Pipelines - url: "/pipelines" - pages: - - title: Creation - url: "/create-pipeline" - - title: Configure artifact repository - url: "/configure-artifact-repository" - - title: Selectors for concurrency synchronization - url: "/concurrency-limit" - - title: Sharing file systems - url: "/sharing-file-system" - - title: Nested workflows - url: "/nested-workflows" - -- title: Deployment - url: "/deployment" - pages: - - title: Creating applications - url: "/create-application" - - title: Monitoring applications - url: "/applications-dashboard" - - title: Managing applications - url: "/manage-application" - - title: Images in Codefresh - url: "/images" - - title: Install Argo Rollouts - url: "/install-argo-rollouts" -- title: Reports & Insights - url: "/reporting" +- title: Dashboards & Insights + url: "/dashboards" pages: - title: Home dashboard url: "/home-dashboard" - title: DORA metrics url: "/dora-metrics" + + +- title: CI/CD Guides + url: "/ci-cd-guides" + pages: + - title: Building your app + url: "/packaging-compilation" + - title: Building Docker images + url: "/building-docker-images" + - title: Accessing Docker registries from Kubernetes cluster + url: "/access-docker-registry-from-kubernetes" + - title: Working with Docker registries + url: "/working-with-docker-registries" + - title: Adding config maps to namespaces + url: "/add-config-maps-to-your-namespaces" + - title: Pull Requests and branches + url: "/pull-request-branches" + - title: Building microservices + url: "/microservices" + - title: Deploying to predefined environments + url: "/environment-deployments" + - title: Previewing dynamic environments + url: "/preview-environments" + - title: Progressive delivery + url: "/progressive-delivery" + - title: GitOps deployments + url: "/gitops-deployments" + - title: Helm best practices + url: "/helm-best-practices" + - title: Kubernetes templating + url: "/kubernetes-templating" + + + + +- title: Example catalog + url: "/example-catalog" + pages: + - title: CI/CD examples + url: "/examples" + - title: CI examples + url: "/ci-examples" + sub-pages: + - title: Check out Git repositories + url: "/git-checkout" + - title: Custom Git commands + url: "/git-checkout-custom" + - title: Non-Git checkouts + url: "/non-git-checkout" + - title: Use Git Hash in CI + url: "/get-short-sha-id-and-use-it-in-a-ci-process" + - title: Build an Image with the Dockerfile in root directory + url: "/build-an-image-with-the-dockerfile-in-root-directory" + - title: Build an Images specifying Dockerfile Location + url: "/build-an-image-specify-dockerfile-location" + - title: Build an Image from a different Git repository + url: "/build-an-image-from-a-different-git-repository" + - title: Build and push an Image + url: "/build-and-push-an-image" + - title: Build an Image with build arguments + url: "/build-an-image-with-build-arguments" + - title: Share data between steps + url: "/shared-volumes-between-builds" + - title: Upload/download from Google Storage buckets + url: "/uploading-or-downloading-from-gs" + - title: Call other pipelines + url: "/call-child-pipelines" + - title: Run unit tests + url: "/run-unit-tests" + - title: Run integration tests + url: "/run-integration-tests" + - title: Fan-in/fan-out with unit tests + url: "/fan-in-fan-out" + - title: Codecov coverage reports + url: "/codecov-testing" + - title: Coveralls coverage reports + url: "/coveralls-testing" + - title: Codacy coverage reports + url: "/codacy-testing" + - title: Run integration tests with Mongo + url: "/integration-tests-with-mongo" + - title: Run integration tests with MySQL + url: "/integration-tests-with-mysql" + - title: Run integration tests with PostgreSQL + url: "/integration-tests-with-postgres" + - title: Run integration tests with Redis + url: "/integration-tests-with-redis" + - title: Populate a database with existing data + url: "/populate-a-database-with-existing-data" + - title: Share volumes in composition steps + url: "/shared-volumes-of-service-from-composition-step-for-other-yml-steps" + - title: Import data to MongoDB + url: "/import-data-to-mongodb" + - title: Vault Secrets in the Pipeline + url: "/vault-secrets-in-the-pipeline" + - title: Decrypt with Mozilla SOPS + url: "/decryption-with-mozilla-sops" + - title: GitOps secrets + url: "/gitops-secrets" + - title: Launch Composition + url: "/launch-composition" + - title: Use Docker compose + url: "/launching-a-composition-and-defining-a-service-environment-variables-using-a-file" + - title: Send notification to Slack + url: "/sending-the-notification-to-slack" + - title: Send notification to Jira + url: "/sending-the-notification-to-jira" + - title: CD examples + url: "/cd-examples" + sub-pages: + - title: Import data to MongoDB + url: "/import-data-to-mongodb" + - title: NodeJS + Angular2 + MongoDB + url: "/nodejs-angular2-mongodb" + - title: Secure a Docker Container Using HTTP Basic Auth + url: "/secure-a-docker-container-using-http-basic-auth" + - title: Spring Boot + Kafka + Zookeeper + url: "/spring-boot-kafka-zookeeper" + - title: Web terminal + url: "/web-terminal" + - title: Trigger a K8s Deployment from a DockerHub Push Event + url: "/trigger-a-k8s-deployment-from-docker-registry" + - title: Deploy to VM + url: "/packer-gcloud" + - title: Deploy to a VM via FTP + url: "/transferring-php-ftp" + - title: Deploy to Tomcat using SCP + url: "/deploy-to-tomcat-via-scp" + - title: Deploy with Helm + url: "/helm" + - title: Deploy with Terraform + url: "/terraform" + - title: Deploy with Pulumi + url: "/pulumi" + - title: Deploy to Nomad + url: "/nomad" + - title: Deploy to Heroku + url: "/deploy-to-heroku" + - title: Use kubectl as part of Freestyle step + url: "/use-kubectl-as-part-of-freestyle-step" + - title: Deploy with Kustomize + url: "/deploy-with-kustomize" + - title: Deploy to Docker Swarm + url: "/docker-swarm" + - title: Amazon ECS/Fargate + url: "/amazon-ecs" + - title: Elastic Beanstalk + url: "/elastic-beanstalk" + - title: GitOps examples + url: "/cd-examples" -- title: Image enrichment +- title: Pipeline integrations url: "/integrations" + pages: + - title: Codefresh Hosted GitOps + url: "/codefresh-hosted-gitops" + - title: Git Providers + url: "/git-providers" + - title: Kubernetes + url: "/kubernetes" + - title: Amazon Services + url: "/amazon-web-services" + - title: Microsoft Azure + url: "/microsoft-azure" + - title: Google Cloud + url: "/google-cloud" + - title: Docker registries + url: "/docker-registries" + sub-pages: + - title: Docker Hub + url: "/docker-hub" + - title: Azure Docker Registry + url: "/azure-docker-registry" + - title: Amazon EC2 Container Registry + url: "/amazon-ec2-container-registry" + - title: Google Container Registry + url: "/google-container-registry" + - title: Google Artifact Registry + url: "/google-artifact-registry" + - title: JFrog Bintray.io/Artifactory + url: "/bintray-io" + - title: Quay.io + url: "/quay-io" + - title: GitHub Container Registry + url: "/github-container-registry" + - title: DigitalOcean Container Registry + url: "/digital-ocean-container-registry" + - title: Other Registries + url: "/other-registries" + - title: Secret Storage + url: "/secret-storage" + - title: Hashicorp Vault + url: "/hashicorp-vault" + - title: Helm Integration + url: "/helm" + - title: ArgoCD Integration + url: "/argocd" + - title: Datadog Integration + url: "/datadog" + - title: Jira Integration + url: "/jira" + - title: Jenkins Integration + url: "/jenkins-integration" + - title: Codecov Integration + url: "/codecov-integration" + - title: Google Cloud builder + url: "/gcloud-builder" + - title: Google Marketplace Installation + url: "/google-marketplace" + - title: GitHub Actions + url: "/github-actions" + - title: Notifications + url: "/notifications" + sub-pages: + - title: Slack + url: "/slack-integration" + - title: Jira + url: "/jira-integration" + - title: Codefresh API + url: "/codefresh-api" + + +- title: GitOps integrations + url: "/gitops-integrations" pages: - - title: Image enrichment with integrations + - title: Image enrichment with GitOps integrations url: "/image-enrichment-overview" - - title: CI integrations + - title: GitOps CI integrations url: "/ci-integrations" sub-pages: - title: Codefresh Classic @@ -115,63 +292,279 @@ url: "/github-actions" - title: Jenkins url: "/jenkins" - - title: Issue tracking + - title: GitOps issue tracking integrations url: "/issue-tracking" sub-pages: - - title: Jira - url: "/jira" - - title: Container registries + - title: Jira + url: "/jira" + - title: GitOps container registry integrations url: "/container-registries" sub-pages: - - title: Amazon ECR - url: "/amazon-ecr" - - title: Docker Hub Registry - url: "/dockerhub" - - title: GitHub Container Registry - url: "/github-cr" - - title: JFrog Artifactory - url: "/jfrog" - - title: Quay Registry - url: "/quay" + - title: Amazon ECR + url: "/amazon-ecr" + - title: Docker Hub Registry + url: "/dockerhub" + - title: GitHub Container Registry + url: "/github-cr" + - title: JFrog Artifactory + url: "/jfrog" + - title: Quay Registry + url: "/quay" + +- title: Deployments + url: "/deployments" + pages: + - title: Kubernetes + url: "/kubernetes" + sub-pages: + - title: Deployment options for Kubernetes + url: "/deployment-options-to-kubernetes" + - title: Managing Kubernetes clusters + url: "/manage-kubernetes" + - title: Custom kubectl commands + url: "/custom-kubectl-commands" + - title: Helm + url: "/helm" + sub-pages: + - title: Using Helm in a Codefresh pipeline + url: "/using-helm-in-codefresh-pipeline" + - title: Managing Helm Releases + url: "/helm-releases-management" + - title: Using managed Helm repos + url: "/managed-helm-repository" + - title: Helm Charts and repositories + url: "/helm-charts-and-repositories" + - title: Custom Helm uploads + url: "/custom-helm-uploads" + - title: Promoting Helm environments + url: "/helm-environment-promotion" + - title: GitOps + url: "/gitops" + sub-pages: + - title: Creating GitOps applications + url: "/create-application" + - title: Monitoring GitOps applications + url: "/applications-dashboard" + - title: Managing GitOps applications + url: "/manage-application" + - title: Progressive delivery with GitOps + url: "/install-argo-rollouts" + - title: Images for GitOps + url: "/images" + +- title: Pipelines + url: "/pipelines" + pages: + - title: Introduction to Codefresh pipelines + url: "/introduction-to-codefresh-pipelines" + - title: Creating a CI pipeline + url: "/pipelines" + - title: Steps in pipelines + url: "/steps" + sub-pages: + - title: Git-clone + url: "/git-clone" + - title: Freestyle + url: "/freestyle" + - title: Build + url: "/build" + - title: Push + url: "/push" + - title: Composition + url: "/composition" + - title: Launch-composition + url: "/launch-composition" + - title: Deploy + url: "/deploy" + - title: Approval + url: "/approval" + - title: Conditional execution of steps + url: "/conditional-execution-of-steps" + - title: Post-step operations + url: "/post-step-operations" + - title: Triggers in pipelines + url: "/triggers" + sub-pages: + - title: Git triggers + url: "/git-triggers" + - title: DockerHub triggers + url: "/dockerhub-triggers" + - title: Azure triggers + url: "/azure-triggers" + - title: Quay triggers + url: "/quay-triggers" + - title: Helm triggers + url: "/helm-triggers" + - title: Artifactory triggers + url: "/jfrog-triggers" + - title: Timer (Cron) triggers + url: "/cron-triggers" + - title: Variables in pipelines + url: "/variables" + - title: Hooks in pipelines + url: "/hooks" + - title: Annotations in pipelines + url: "/annotations" + - title: Grouping steps into stages + url: "/stages" + - title: Caching for pipelines + url: "/pipeline-caching" + - title: Debugging CI pipelines + url: "/debugging-pipelines" + - title: Monitoring CI pipelines + url: "/monitoring-pipelines" + - title: Advanced workflows + url: "/advanced-workflows" + - title: Running pipelines locally + url: "/running-pipelines-locally" + - title: Configuration for pipelines + url: "/configuration" + sub-pages: + - title: Global pipeline settings + url: "/pipeline-settings" + - title: Shared configuration + url: "/shared-configuration" + - title: Secrets for pipelines + url: "/secrets-store" + - title: Public logs and status badges + url: "/build-status" + - title: Service containers + url: "/service-containers" + - title: Deployment environments + url: "/deployment-environments" + - title: Docker image metadata + url: "/docker-image-metadata" + - title: Pipeline definitions YAML + url: "/what-is-the-codefresh-yaml" + + +- title: Workflows + url: "/workflows" + pages: + - title: Creating workflows + url: "/create-pipeline" + - title: Nested workflows + url: "/nested-workflows" + - title: Configure artifact repository + url: "/configure-artifact-repository" + - title: Selectors for concurrency synchronization + url: "/concurrency-limit" + - title: Sharing file systems + url: "/sharing-file-system" + +- title: CI/CD testing + url: "/testing" + pages: + - title: Unit tests + url: "/unit-tests" + - title: Integration tests + url: "/integration-tests" + - title: Creating test reports + url: "/test-reports" + - title: Creating compositions + url: "/create-composition" + - title: Dynamic preview environments + url: "/automatic-preview-environments" + - title: Security scanning + url: "/security-scanning" + - title: SonarQube scanning + url: "/sonarqube-integration" + +- title: Clients + url: "/clients" + pages: + - title: Download CLI + url: "/csdp-cli" + + +- title: Installation + url: "/installation" + pages: + - title: Environments + url: "/installation-options" + - title: Runtime architectures + url: "/runtime-architecture" + - title: Codefresh Runner + url: "/codefresh-runner" + - title: On-Premises installation + url: "/codefresh-on-prem" + - title: On-Premises upgrade + url: "/codefresh-on-prem-upgrade" + - title: GitOps + url: + sub-pages: + - title: Hosted GitOps Runtime + url: "/hosted-runtime" + - title: Hybrid GitOps Runtime + url: "/hybrid-gitops" + - title: Monitoring & managing GitOps Runtimes + url: "/monitor-manage-runtimes" + - title: Add external clusters to GitOps Runtimes + url: "/managed-cluster" + - title: Add Git Sources to to GitOps Runtimes + url: "/git-sources" - title: Administration url: "/administration" pages: - - title: Manage users - url: "/add-users" - - title: User settings - url: "/user-settings" - - title: Set up OAuth2 authentication for Git providers - url: "/oauth-setup" - - title: Authorize access to organizations/projects - url: "/hosted-authorize-orgs" - - title: Single Sign-On - url: "/single-sign-on" - sub-pages: - - title: SAML2 - url: "/sso-setup-saml2" - - title: OpenID Connect - url: "/sso-setup-oauth2" + - title: Account & user management + sub-pages: + - title: Create a Codefresh account + url: "/create-codefresh-account" + - title: Adding users and teams + url: "/add-users" + - title: Configuring access control + url: "/access-control" + - title: Setting up OAuth2 for Git providers + url: "/oauth-setup" + - title: Authorize access to organizations/projects + url: "/hosted-authorize-orgs" + - title: Auditing actions in Codefresh + url: "/audit" + - title: Codefresh IP addresses + url: "/platform-ip-addresses" + - title: User self-management + sub-pages: + - title: Managing personal settings + url: "/user-settings" + - title: Managing Git PATs + url: "/manage-pats" + +- title: Single Sign-On + url: /single-sign-on + pages: + - title: Common configuration + url: /team-sync + - title: OpenID Connect + url: /oidc + sub-pages: - title: Auth0 - url: "/sso-auth0" + url: /oidc-auth0 - title: Azure - url: "/sso-azure" + url: /oidc-azure - title: Google - url: "/sso-google" - - title: LDAP - url: "/sso-ldap" + url: /oidc-google + - title: Okta + url: /oidc-okta + - title: OneLogin + url: /oidc-onelogin + - title: SAML + url: /saml-setup + sub-pages: + - title: JumpCloud + url: /saml-jumpcloud - title: Okta - url: "/sso-okta" + url: /saml-okta - title: OneLogin - url: "/sso-onelogin" - - title: Access Control - url: "/access-control" - - title: Audit - url: "/audit" - - title: Codefresh IP addresses - url: "/platform-ip-addresses" + url: /saml-onelogin + - title: PingID SSO + url: "/saml-pingid" + - title: LDAP + url: /ldap + + - title: Reference url: "/reference" @@ -183,18 +576,59 @@ - title: Shared configuration repo url: "/shared-configuration" -- title: What's New? - url: "/whats-new" - pages: - - title: What's new in Codefresh? - url: "/whats-new" - - title: Troubleshooting url: "/troubleshooting" pages: - - title: Runtimes + - title: Common Issues + url: "/common-issues" + sub-pages: + - title: Can't find your organization repositories + url: "/cant-find-your-organization-repositories" + - title: Can’t find your private repositories + url: "/cant-find-your-private-repositories" + - title: 'Clone step failed: Command [git checkout $REVISION] exited with code [1]' + url: "/git-clone-step-issue" + - title: Handling commit messages with a quote character + url: "/handling-commit-messages-with-quotes" + - title: Docker image does not exist or no pull access + url: "/the-docker-image-does-not-exist-or-no-pull-access" + - title: 'Build step: No such file or directory' + url: "/build-step-no-such-file-or-directory" + - title: No Dockerfile found + url: "/no-dockerfile-found" + - title: Failed to tag image + url: "/could-not-tag-image" + - title: Error Code 137 + url: "/error-code-137" + - title: Too many requests + url: "/dockerhub-rate-limit" + - title: Restoring data from pre-existing image hangs on + url: "/restoring-data-from-pre-existing-image-hangs-on" + - title: Disabling codefresh caching mechanisms + url: "/disabling-codefresh-caching-mechanisms" + - title: Pinning codefresh.yml for multi-git triggers + url: "/multi-git-triggers" + - title: Workflow terminated by the system + url: "/workflow-terminated-by-system" + - title: cf_export limitations + url: "/cf-export-limitations" + - title: Validation port warnings + url: "/validation-port-warnings" + - title: Forbidden Kubernetes resources + url: "/forbidden-cluster-resources" + - title: How to use SSH keys in freestyle steps + url: "using-ssh-keys" + - title: Failed to get accounts clusters during workflow + url: "/failed-to-get-accounts-clusters-during-workflow" + - title: Paging issues for builds and images + url: "/paging-issues-builds-images" + - title: Git step migration + url: "/git-step-migration" + - title: Personal Git Deprecation + url: "/personal-git-deprecation" + - title: GitOPs runtimes url: "/runtime-issues" - + - title: Terms and Privacy Policy url: "/terms-and-privacy-policy" @@ -205,3 +639,5 @@ url: "/privacy-policy" - title: Service Commitment url: "/sla" + + diff --git a/_docs/administration/access-control.md b/_docs/administration/access-control.md deleted file mode 100644 index 3c55b8cc4..000000000 --- a/_docs/administration/access-control.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: "Access control" -description: "" -group: administration -toc: true - ---- -Access control defines the access policy for resources within an enterprise. -In Codefresh, access control to an entity is derived from the entity type, which can be categorized into one of the following: - -* **GitOps-controlled entities** - Entities whose entire lifecyle - creation, update, and deletion, are fully GitOps-controlled. - Examples of such entities in Codefresh include: - * Runtimes - * Git Sources - * Pipelines comprising Argo Workflow/Events resources such as the Workflow Template, Sensor, Event Sources - * Applications comprising Argo CD/Rollouts resources project, Application Set, applications, rollout - -* **Non-GitOps-controlled entities** - - Entities reported to Codefresh as built artifacts, not GitOps-controlled. - - Examples of such entities in Codefresh include: - * Images - -* **Account-configuration entities (currently non-GitOps-controlled)** - - Entities whose state is not currently stored in a Git repository. - Examples of such entities in Codefresh include: - - * Account configuration collaborators - * Account configuration security - * Account configuration Single Sign-On (SSO) - * Billing - - -### GitOps-controlled entities -Codefresh stores the state of your account entities according to GitOps principles and policies. - -#### Write permissions -Users with write permissions can access and manage files directly in the Git repository. Any action on the file such as create, update, or delete, is immediately reflected in the user account. - -Any user action via a Codefresh client (UI or CLI), on a GitOps-controlled resource, is impersonated with the user's Git permissions. If the user does not have permissions for an action in Git, then the user is automatically denied access to the same action in a Codefresh client. - -For Codefresh to impersonate the user, the user must provide Git credentials for every runtime. The credentials are securely stored by the Codefresh application proxy. -The Codefresh application proxy uses these credentials: -* For Git-provider operations -* To update Codefresh with the read/write permissions to all existing repositories linked to the Git Source defined for a runtime. The Codefresh client can perform client-side validations. - -To add your Git personal token, in the Codefresh UI, go to your avatar and then select [user settings](https://g.codefresh.io/2.0/user-settings). - -{% include -image.html -lightbox="true" -file="/images/administration/access-control/pat.png" -url="/images/administration/access-control/pat.png" -alt="Add personal access token" -caption="Add personal access token" -max-width="30%" -%} - -#### Read permissions -Codefresh enforces read permissions by checking if the user has Git permissions to view the Kubernetes manifest in the repository. -Read permissions to entities created dynamically from changes in resource state, are inherited from the parent entity's permissions. - -From the user's perspective, this means that: - -* If the user does not have read permissions from the Git provider for the Sensor's Kubernetes manifest, the user does not have visibility into pipelines. - Workflow entities that are dynamically created, derive their read permissions from pipeline permissions. - -* Notifications are displayed only for resources with read permissions. - - -> Currently, we do not enforce Analytics views according to read permissions for pipelines. - -#### Write operations on dynamically-created entities -These are operations users can perform on dynamically-created entities, such as workflows for example. Typically, the permissions for such entities are derived from those of the parent entity. - -Currently, all users with view permissions, can also terminate and retry workflows. - - -### Non-GitOps-controlled entities -For now, users can view all `image` entity types. These are resources reported to Codefresh as built artifacts, but not stored using the GitOps approach. - -### Account-configuration for non-GitOps-controlled entities -All account-configuration entities you have access to are listed in your account settings, and are exposed only to account admins. - -When adding a user account, you can assign the `admin` role to the user. The `admin` role automatically enables all account-configurations. - -### Runtime account-configuration -Runtime configuration is also exposed in the account settings dedicated area and only exposed to admins but is fully controlled via the GitOps approach after installation.
- -Users with write permissions to the runtime installation repository in Git can make changes to the runtime, and create, update, or delete Git Sources defined for that runtime. -We are at present exposing the runtime configuration under the account settings only to account admins. -Be aware though that these can also be changed directly through Git by users who are not admin users in Codefresh.
- -For now, Codefresh admin users can see all runtimes and Git Sources even if they don't have read permissions to the underlying Git repository. - - -### Upcoming enhancements to access control -We are working to enhance our access control model by adding another layer to provide the ability to define: -* Permissions on write operations for entities that are non-GitOps controlled, such as account configuration and workflow operations -* Read permissions for entities that are completely non-GitOps controlled -* A more granular permission model for entities that are GitOps-controlled, but without sufficient access control policies in place -* A more granular permission model for dynamic resources that are non-GitOps controlled, but created from a GitOps-controlled entity, for example, workflows - -### What to read next -[Codefresh architecture](({{site.baseurl}}/docs/getting-started/architecture/)) diff --git a/_docs/administration/account-user-management/access-control.md b/_docs/administration/account-user-management/access-control.md new file mode 100644 index 000000000..3d55748c1 --- /dev/null +++ b/_docs/administration/account-user-management/access-control.md @@ -0,0 +1,248 @@ +--- +title: "Configuring access control" +description: "Restrict resources in a company environment" +group: administration +sub_group: account-user-management +redirect_from: + - /docs/enterprise/access-control/ + - /docs/enterprise-account-mng/ent-account-mng/ + - /docs/enterprise/ent-account-mng/ + - /docs/administration/ent-account-mng/ +toc: true + +--- + +Codefresh provides several complementary ways for access control within an organization: + +* **Role-based access**: [Role-based access](#users-and-administrators), restricts access to parts of the Codefresh UI intended for account administrators. For example, only an account administrator should be able to change integrations with [git providers]({{site.baseurl}}/docs/integrations/git-providers/) and [cloud services]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/). + +* **Attribute-based access control (ABAC)**: Policy-based access control via attributes (ABAC), restricts access to [Kubernetes clusters and pipelines](#access-to-kubernetes-clusters-and-pipelines). This option allows account administrators to define exactly which teams have access to which clusters and pipelines. For example, access to production clusters can be granted only to a subset of trusted developers/operators. On the other hand, access to a QA/staging cluster can be less strict. + +* **Git-repository access**: Restrict the Git repositories used to load [pipeline definitions](#pipeline-definition-restrictions). + + +## Role-based access for users and administrators + +Role-based access, as either a user or an administrator, is usually defined when you [add users to Codefresh accounts]({{site.baseurl}}/docs/administration/add-users/#users-in-codefresh). + +> To add users and assign or change user roles, you must be an administrator yourself. + + +{% include + image.html + lightbox="true" + file="/images/administration/users/invite-users.png" + url="/images/administration/users/invite-users.png" + alt="User roles for access control" + caption="User roles for access control" + max-width="90%" +%} + +The table below lists the functionality available for role-based access. + +{: .table .table-bordered .table-hover} +| Functionality | Available for Role | +| -------------- | -------------- | +|Run pipelines | `User` and `Admin`| +|View Docker images | `User` and `Admin`| +|Inspect text reports | `User` and `Admin`| +|[Git Integrations]({{site.baseurl}}/docs/integrations/git-providers/) | `Admin`| +|[External docker registry settings]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) | `Admin`| +|[External Helm repositories]({{site.baseurl}}/docs/new-helm/add-helm-repository/) | `Admin`| +|[Cloud provider settings]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/) | `Admin`| +|[Cloud storage settings]({{site.baseurl}}/docs/testing/test-reports/#connecting-your-storage-account) | `Admin`| +|[Shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) | `Admin`| +|[API token generation]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions) | `Admin`| +|[SSO Settings]({{site.baseurl}}/docs/administration/single-sign-on/) | `Admin`| +|[Runtime environment selection]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-settings) | `Admin`| +|[Slack settings]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) | `Admin`| +|[Audit logs]({{site.baseurl}}/docs/administration/audit-logs/) | `Admin`| +|ABAC for Kubernetes clusters | `Admin`| +|Billing and charging | `Admin`| + + + +## ABAC access control for Kubernetes clusters and pipelines + +ABAC (Attribute-Based Access Control), allows fine-grained access to Kubernetes clusters and pipelines. See ([ABAC](https://en.wikipedia.org/wiki/Attribute-based_access_control){:target="\_blank"}. + +ABAC access control includes: + +1. Assigning custom attributes to your Kubernetes clusters +1. Assiging custom attributes to your pipelines +1. Defining rules as policies using teams, clusters, and attributes (who, what, where) + + + +### Add Kubernetes clusters with policy attributes + +After adding Kubernetes clusters, you can configure clusters with multiple tags. + +Tag names are arbitrary, and can be anything you choose that matches your company process. You can tag your clusters with product names, software lifecycle phases, department names, or name that helps your security policies. + +You can assign multiple tags to each cluster, making it easy to define multiple policies on the same cluster. For example, per project and per team. + +{% include image.html + lightbox="true" + file="/images/administration/access-control/kubernetes-abac.png" + url="/images/administration/access-control/kubernetes-abac.png" + alt="Cluster tags" + caption="Cluster tags" + max-width="70%" + %} + +**Before you begin** +* If needed, [add a Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/) + +**How to** + +1. Expand the provider under which you added the cluster. +1. Mouse over the cluster to which to add tags or attributes, and then click **Edit tags** on the right. + The Tags page displays existing tags if any, and allows you to add multiple tags for a single cluster. + + +{% include image.html + lightbox="true" + file="/images/administration/access-control/tagging-kubernetes-clusters.png" + url="/images/administration/access-control/tagging-kubernetes-clusters.png" + alt="Assigning tags to a cluster" + caption="Assigning tags to a cluster" + max-width="60%" + %} +1. Click **Add** and type in the tag. +1. Continue to add tags and when finished, click **Save**. + +>By default, all clusters, with and without tags, are displayed and can be edited by all users (but not deleted). As soon as you add at least one tag to a cluster, the cluster is only accessible to users with the required policy rules (explained in the next sections). + +### Configure pipelines with policy attributes + +Similar to Kubernetes clusters, you can also add tags to specific pipelines. + +**Before you begin** +* If needed, [create a CI pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) + +**How to** + +1. In the Codefresh UI, go to [Pipelines](https://g.codefresh.io/pipelines/all/){:target="\_blank"}. +1. In the row with the target pipline, click the context menu for the pipeline, and then select **Edit tags**. +1. Type in the new tag, press Enter, and continue to add the tags you need. +1. When finished, click **Save**. + + +{% include image.html + lightbox="true" + file="/images/administration/access-control/pipeline-tags.png" + url="/images/administration/access-control/pipeline-tags.png" + alt="Assigning attributes to a pipeline" + caption="Assigning attributes to a pipeline" + max-width="80%" + %} + + +### Define rules for access control +Define security rules using the *who, what, where* pattern to control access to clusters and pipelines by departments, projects, roles etc. + +For each rule you define, select: +1. The team the rule applies to +1. Cluster privileges (*Create/delete/read/update*) or pipeline privileges (*Create/delete/read/run/update*) +1. Effective tags + + +**Before you begin** +* Make sure you have [created at least one team]({{site.baseurl}}/docs/administration/add-users/#create-a-team-in-codefresh) + +**How to** +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Permissions**](https://g.codefresh.io/account-admin/permissions/teams){:target="\_blank"}. +1. For each entity, do the following to define a rule: + 1. Select the team to which assign the rule. + 1. Select the permissions to assign to the team for that entity. + 1. Select either all clusters with tags (**All tags**) or all clusters that are untagged (**Without tags**). + + {% include image.html + lightbox="true" + file="/images/administration/access-control/kubernetes-policies.png" + url="/images/administration/access-control/kubernetes-policies.png" + alt="Kubernetes policies" + caption="Kubernetes policies" + max-width="80%" + %} + +### Description of privileges + +**For clusters:** + +* `Create`: cluster creation requires someone to be account administrator anyway so currently this permission isn’t really necessary . +* `Read` - can only see existing allowed clusters without any ability to change them. +* `Update` - can see and edit existing allowed cluster resources (which means also perform [installation, removal and rollbacks of Helm charts]({{site.baseurl}}/docs/new-helm/helm-best-practices/)). Tags are managed from account settings, so this permission doesn’t apply to it currently. +* `Delete` - cluster removal requires someone to be account administrator anyway so currently this permission isn’t really necessary. + +For pipelines: + +* `Create` - can only create new pipelines, not see, edit (which includes tagging them) or delete them. This permission should also go hand in hand with additional permissions like read/edit untagged pipelines. +* `Read` - view allowed pipelines only. +* `Update` - see and edit allowed pipelines only (including tagging them). +* `Delete` - can delete allowed pipelines only. +* `Run` - can run allowed pipelines only. +* `Approve` - resume pipelines that are waiting for manual [approval]({{site.baseurl}}/docs/codefresh-yaml/steps/approval/). +* `Debug` - allow the usage of the [pipeline debugger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/debugging-pipelines/). + + + +## Git-repository access restrictions + +By default, users can load pipeline definitions when [creating a pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/), from the inline editor, or any private or public Git repository. + +You can change the default behavior to restrict loading CI pipeline definitions from specific Git repositories or completely disable loading the definitions from all Git repositories. + +### Enable/disable access to pipeline YAMLs by source +Enable or disable access to pipeline definition YAMLs based on the source of the YAML. These global settings are effective for all pipelines in the account and enables or disables that method of pipeline creation from the Codefresh UI. +pipeline definitions from: + * The inline editor in the Codefresh UI: Disabling the inline editor for example, disables new and _all existing pipelines_ + with pipeline definitions defined in the Codefresh editor. The Run button is disabled for all such piplines. + * Any Git repository connected to Codefresh + * **Any** public URL + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From Configuration on the sidebar, select [**Pipeline Settings**](https://g.codefresh.io/account-admin/account-conf/pipeline-settings){:target="\_blank"}. + + {% include image.html + lightbox="true" + file="/images/administration/access-control/pipeline-restrictions.png" + url="/images/administration/access-control/pipeline-restrictions.png" + alt="Global pipeline restrictions" + caption="Global pipeline restrictions" + max-width="80%" + %} + +1. Turn on or off the options as needed. +1. Continue with + +### Define access to Git repositories for pipeline YAMLs +If access to pipeline definitions are enabled for Git repositories, you can configure fine-grained restrictions through the integrations settings for your [Git provider]({{site.baseurl}}/docs/integrations/git-providers/). + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From Configuration on the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select the Git provider integration, click **Edit**. +1. Scroll down and expand **YAML Options**. + + {% include image.html + lightbox="true" + file="/images/administration/access-control/pipeline-git-restrictions.png" + url="/images/administration/access-control/pipeline-git-restrictions.png" + alt="Pipeline restrictions per Git provider" + caption="Pipeline restrictions per Git provider" + max-width="80%" + %} + +{:start="5"} +1. Configure restrictions for Git repositories that can be used for pipeline definitions: + * **Allow only the following repositories**: Toggle **Manual selection** to on, and then select the Git repos, or define a regex according to which to select repos. + * **Allow only the following branches**: Select Git repositories by the branches that match the regex. For example, this regex `/^((pipeline-definition)$).*/g`, allows users to load pipeline YAMLs only from a branch named `pipeline-definition` in a Git repository. + * **Allow only the following paths**: Select Git repositories by folders within the repo that match the glob pattern). + + + +## Related articles +[Codefresh installation options]({{site.baseurl}}/docs/administration/installation-security/) +[Managing your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) diff --git a/_docs/administration/account-user-management/add-users.md b/_docs/administration/account-user-management/add-users.md new file mode 100644 index 000000000..6177e2c3c --- /dev/null +++ b/_docs/administration/account-user-management/add-users.md @@ -0,0 +1,115 @@ +--- +title: "Adding users and teams" +description: "Add users and teams to Codefresh accounts" +group: administration +sub_group: account-user-management +toc: true +--- + +Once you have created a Codefresh account, you can add any number of users to collaborate on repositories, workflows, and pipelines, and teams of users. +You can then create teams in Codefresh to group users who share a common denominator, such as the same permissions, access to the same functionality, or roles. Teams make it easy for administrators to both define and manage items shared by multiple users in an orgranization. + + +## Users in Codefresh +Adding a user requires assigning a role to define access to account resources, and optionally, selecting an SSO provider for the user: + +* **Role**: Defines the user's access level to the resources in the account. + * **User**: The default. With this role, users can work with your repositories and pipelines, but cannot change settings +on clusters, docker registries, git integrations, shared configurations etc. + * **Administrator**: User with this role have full access to your account and can change all your settings, so make sure that they are trusted colleagues. + For guidelines on access control, see [Access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/). +* **SSO**: By default, SSO is not enabled for users. If required, explicitly select the SSO provider. For an overview of SSO, see [Single Sign on]({{site.baseurl}}/docs/single-sign-on/). + + +### Add a user to a Codefresh account +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select **Users**, and then select **+ [Add User]**. +1. Type the **User's email address**, and click **Invite**. + + The user receives an email invitation, and in the Users list, the username is set to Pending, and status to Resend. +1. From the **Role** dropdown, select either **User** or **Administrator**. +1. If SSO is configured for the account, **Select SSO provider**. + + + +### Manage users in a Codefresh account + +Once you add a user to your Codefresh account, you can do the following to manage that user: +* Resend invitations that are pending acceptance: Select ![](/images/administration/users/icon-Send.png?display=inline-block) **Resend**. +* Edit the user's email address: Select ![](/images/administration/users/icon-Edit.png?display=inline-block) **Edit**. +* Change the role: From the **Role** dropdown, select the new role. +* Change SSO provider: From the **SSO** dropdown, select the new SSO provider. +* Remove the user account: Select ![](/images/administration/users/icon-Delete.png?display=inline-block) **Delete**. + + + +## Teams in Codefresh +Teams are users who share the same permissions, roles, or as required and defined according to company processes. Teams allow you to enforce access control through ABAC (Attribute Based Access Control). +By default, there are two teams: +* Users +* Admins with users [invited as collaborators]({{site.baseurl}}/docs/accounts/assign-a-user-to-a-team/) + +> Only Enterprise customers can add new teams. Other Codefresh plans can only use the predefined *Users* and *Admin* teams. [Contact us](https://codefresh.io/contact-us/){:target="\_blank"} to upgrade to an Enterprise plan. + +### Create a team in Codefresh + +Create a team in Codefresh and then assign users to the team. You can assign the same user to multiple teams, as in most companies, users have overlapping roles. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **User Management**. +1. From the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select **Teams**, and then select **Create a Team**. +1. Enter the **Team Name**. + > The team name can include only lower-case alphanumeric characters and hyphens, without spaces. + + See the screenshot below for some sample team names. + +{% include image.html + lightbox="true" + file="/images/administration/access-control/teams.png" + url="/images/administration/access-control/teams.png" + alt="Examples of teams in Codefresh" + caption="Examples of teams in Codefresh" + max-width="80%" + %} + +### Assign a user to a team +1. To assign users to the team, do the following: + 1. Hover over the team name and click the **Settings** icon. + 1. Click **Invite to team**, type the email address of the user to invite, and then click **Add**. +1. To change the name of the team, click **Edit** and type the new name. + +## Define session timeouts and domain restrictions for user accounts +As an administrator, you can optionally define session timeouts to automatically log out users who have been inactive for the specified duration, and restrict invitations to specific email domains. + +> The maximum duration for inactivity is 30 days. Inactive users are warned 15 minutes before they are logged out. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select **Security**. +1. For **User Session**, add the timeout duration in minutes/hours/days. + + {% include image.html + lightbox="true" + file="/images/administration/access-control/security-timeout.png" + url="/images/administration/access-control/security-timeout.png" + alt="Security timeout" + caption="Security timeout" + max-width="90%" + %} + +{:start="5"} +1. To restrict invitations to specific email domains, in the **Email domains** field below User Invitations, type in the domains to allow, one per line. + +## Troubleshoot add users + +* [User is prompted to enter an organization name](https://support.codefresh.io/hc/en-us/articles/360020177959-User-is-prompted-to-enter-an-organization-name) +* [Account invitation not permitting login](https://support.codefresh.io/hc/en-us/articles/360015251000-Account-invitation-not-permitting-login) + + + +## Related articles +[Access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Single Sign on]({{site.baseurl}}/docs/single-sign-on/) +[OAuth authentication for Git providers]({{site.baseurl}}/docs/administration/account-user-management/oauth-setup) + diff --git a/_docs/administration/account-user-management/audit.md b/_docs/administration/account-user-management/audit.md new file mode 100644 index 000000000..0fa2d9d9f --- /dev/null +++ b/_docs/administration/account-user-management/audit.md @@ -0,0 +1,111 @@ +--- +title: "Audit logs" +description: "Get a list of all actions in Codefresh" +group: administration +sub_group: account-user-management +redirect_from: + - /docs/enterprise/audit-logs/ +toc: true +--- + +Codefresh keeps a log of all actions that happen at all times based on API calls that reach Codefresh. +The time frames covered by audit logs depends on the pricing tier of your Codefresh account. + +The audit log includes: +* UI actions from users +* [CLI](https://codefresh-io.github.io/cli/) invocations +* Any [external integrations]({{site.baseurl}}/docs/integrations/codefresh-api/) used with Codefresh + +You can: +* View, filter, and search for audited events +* View API payload for an event +* Download the audit log file in CSV + +## View audit logs +The Audit Log is divided into actions audited (All Audit), and tiggers and webhooks processed by Codefresh (Triggers). + + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Audit**](https://g.codefresh.io/account-admin/audit/audit-all){:target="\_blank"}. +1. To focus on a specific time frame, select the date range from the toolbar. + The All Audit tab includes all Codefresh events in your account, sorted by the most recent events. + Each event shows the following details: + * `Entity ID/Name`: The entity that was affected. + * `Entity type`: The type of entity on which the action was action, such as user, team, build, pipeline, project, etc. + * `Action`: The action that was taken on the entity. + * `Status`: The result of the API call. + * `User`: The name of the user who performed the action. + * `Last Request`: The time of the event. + + +{% include image.html +lightbox="true" +file="/images/administration/audit/audit-logs.png" +url="/images/administration/audit/audit-logs.png" +alt="Audit Logs view" +caption="Audit Logs view" +max-width="70%" +%} + + +The Triggers tab includes all the triggers/webhooks that were processed by Codefresh, with the same information as the Audit tab. + +{% include image.html +lightbox="true" +file="/images/administration/audit/audit-triggers.png" +url="/images/administration/audit/audit-triggers.png" +alt="Audit Triggers view" +caption="Audit Triggers view" +max-width="70%" +%} + + +Both tabs have built-in paging and filtering. + + + +### Filter audited events + +Filter audited events to focus on a specific entity or user. + +{% include image.html +lightbox="true" +file="/images/administration/audit/audit-filter.png" +url="/images/administration/audit/audit-filter.png" +alt="Filtering audit actions" +caption="Filtering audit actions" +max-width="40%" +%} + + +### Get more details for audited events + +You can get the exact API payload for each event as it was sent to Codefresh, including the URL and other call parameters used for the selected event. + +* At the right of the row with the event, click the **More Details** (book) icon. + + +{% include image.html +lightbox="true" +file="/images/administration/audit/api-call-details.png" +url="/images/administration/audit/api-call-details.png" +alt="API call details for audited event" +caption="API call details for audited event" +max-width="40%" +%} + + + +## Export audit logs + +Export all audited events, both Audits and Triggers, to a `CSV` file, for offline processing with your own tools or for viewing in external applications such as Microsoft Excel. + +* On the top right of the toolbar, click **Download Audit**. + The downloaded file includes in addition to the events themselves, the API call information (payload and parameters) for each event. + + + +## Related articles +[Codefresh installation options]({{site.baseurl}}/docs/installation/installation-security/) +[Configuring access Control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/) diff --git a/_docs/administration/account-user-management/create-codefresh-account.md b/_docs/administration/account-user-management/create-codefresh-account.md new file mode 100644 index 000000000..158246684 --- /dev/null +++ b/_docs/administration/account-user-management/create-codefresh-account.md @@ -0,0 +1,219 @@ +--- +title: "Create a Codefresh account" +description: "Welcome to Codefresh!" +group: administration +sub_group: account-user-management +redirect_from: + - /docs/ + - /docs/create-an-account/ + - /docs/getting-started/ + - /docs/getting-started/introduction/ +--- +Before you can do anything in Codefresh such as building and deploying your applications, you need to create a Codefresh account. + +Creating an account in Codefresh is free (no credit card is required) and can be done in three simple steps + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/create-account-steps.png" +url="/images/administration/create-account/create-account-steps.png" +alt="Codefresh account creation steps" +max-width="90%" +%} + +## Step 1: Select your Identity Provider +As the first step in setting up ypur account in Codefresh, select the identity provider (IdP) to use. +Codefresh currently supports the following IdPs: +* GitHub +* Bitbucket +* GitLab +* Azure +* Google +* LDAP + +If you need an IdP that is not in the list, please [contact us](https://codefresh.io/contact-us/) with the details. + +>NOTES: + For Git repositories, the login method is less important, as you can Git repositories through [Git integrations]({{site.baseurl}}/docs/integrations/git-providers/), regardless of your sign-up process. + + If you multiple sign-up methods, as long as you use the same email address in all the sign-ups, Codefresh automatically redirects you to the account dashboard. + +1. Go to the [Codefresh Sign Up page](https://g.codefresh.io/signup). + + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/select-identity-provider.png" +url="/images/administration/create-account/select-identity-provider.png" +alt="Codefresh sign-up page" +caption="Codefresh sign-up page" +max-width="40%" +%} + +{:start="2"} +1. Select the IdP for sign-up. +1. Continue with [Step 2: Accept the permissions request](#step2-accept-the-permissions-request) + + + +## Step 2: Accept the permissions request + +After you select the IdP (identity provider), Codefresh requests permission to access your basic details, and for Git providers, to access your Git repositories. The Permissions window that is displayed differs according to the IdP selected in the previous step. + +Don't worry, Codefresh will not do anything without your explicit approval, so don't be scared by the permissions shown +in the request window. The permissions requested by Codefresh are needed in order to build and deploy your projects. + +1. Do any of the following: + * For GitHub: To continue, click **Authorize codefresh-io**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/github-authorize.png" +url="/images/administration/create-account/github-authorize.png" +alt="GitHub authorization page" +caption="GitHub authorization page" +max-width="50%" +%} + + * For Bitbucket: To continue, click **Grant access**. + + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/bitbucket-authorize.png" +url="/images/administration/create-account/bitbucket-authorize.png" +alt="Bitbucket authorization page" +caption="Bitbucket authorization page" +max-width="50%" +%} + + * For GitLab: To continue, click **Authorize**. + + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/gitlab-authorize.png" +url="/images/administration/create-account/gitlab-authorize.png" +alt="GitLab authorization page" +caption="GitLab authorization page" +max-width="50%" +%} + + Once you confirm the permissions for your Git provider, Codefresh automatically connects to your Git provider and fetches your basic account details, such as your email. + +{:start="2"} +1. Continue with [Step 3: Verify account details](#step-3-verify-account-details). + +## Step 3: Verify account details + +Verifying account details is the final step in creating your Codefresh account. + +1. Review the details for your new account, make the relevant changes, and click **NEXT**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-signup.png" +url="/images/administration/create-account/codefresh-signup.png" +alt="Codefresh account details" +caption="Codefresh account details" +max-width="40%" +%} + +{:start="2"} +1. Enter a name for your account, and click **NEXT**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-accountname.png" +url="/images/administration/create-account/codefresh-accountname.png" +alt="Codefresh account name" +caption="Codefresh account name" +max-width="40%" +%} + +{:start="3"} +1. Finally, answer the questions to personalize your account and click **FINISH**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-personalize.png" +url="/images/administration/create-account/codefresh-personalize.png" +alt="Codefresh personalize account" +caption="Codefresh personalize account " +max-width="40%" +%} + +Congratulations! Your new Codefresh account is now ready. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-dashboard.png" +url="/images/administration/create-account/codefresh-dashboard.png" +alt="Codefresh dashboard" +caption="Codefresh dashboard" +max-width="40%" +%} + + + + +## Related articles +[Adding users and teams]({{site.baseurl}}/docs/administration/account-user-management/add-users/) +[Configuring access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Codefresh IP addresses]({{site.baseurl}}/docs/administration/account-user-management/platform-ip-addresses/) +[Create a basic pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) +[Pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Deploy to Kubernetes]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) + + diff --git a/_docs/administration/hosted-authorize-orgs.md b/_docs/administration/account-user-management/hosted-authorize-orgs.md similarity index 100% rename from _docs/administration/hosted-authorize-orgs.md rename to _docs/administration/account-user-management/hosted-authorize-orgs.md diff --git a/_docs/administration/oauth-setup.md b/_docs/administration/account-user-management/oauth-setup.md similarity index 92% rename from _docs/administration/oauth-setup.md rename to _docs/administration/account-user-management/oauth-setup.md index be8d4e21d..632cf4a72 100644 --- a/_docs/administration/oauth-setup.md +++ b/_docs/administration/account-user-management/oauth-setup.md @@ -1,7 +1,8 @@ --- -title: "Set up OAuth2 authentication for Git providers" +title: "Seting up OAuth2 for Git providers" description: "" group: administration +sub_group: account-user-management toc: true --- @@ -32,7 +33,7 @@ Codefresh supports OAuth2 or personal access tokens (PATs) for authentication: -### Authentication for Git providers and runtime accounts +## Authentication for Git providers and runtime accounts The [Authentication](https://g.codefresh.io/2.0/account-settings/authentication?providerName=github){:target="\_blank"} page displays the accounts by Git provider and the authentication method selected for the same. Authentication accounts are organized by Runtimes. A runtime can have a single authentication account. @@ -50,7 +51,7 @@ The Type column identifies the authentication for the provider account as either As the account administrator, you can change the authentication method for an account at any time to either Codefresh or Custom, or manual token entry. See [Select authentication mechanism for runtime](#select-authentication-mechanism-for-runtime). -### Create a custom OAuth2 Application for Git provider +## Create a custom OAuth2 Application for Git provider Create a custom OAuth2 Application for Codefresh in your Git provider accounts with the correct scopes, and set up authentication for the same within Codefresh. Users in Codefresh can then authorize access to the Git provider using OAuth2, instead of a personal access token. Supported Git providers: @@ -71,7 +72,7 @@ To set up OAuth2 authorization in Codefresh, you must:
{:/} -#### Step 1: Create a custom OAuth2 Application in Git +### Step 1: Create a custom OAuth2 Application in Git Create and register an OAuth App under your organization to authorize Codefresh. 1. Follow the step-by-step instructions for your Git provider: @@ -111,7 +112,7 @@ Create and register an OAuth App under your organization to authorize Codefresh.
{:/} -#### Step 2: Create a K8s secret resource in the runtime cluster +### Step 2: Create a K8s secret resource in the runtime cluster Create a K8s secret in the runtime cluster, using the example below as a guideline. You must define the application ID (`appId`), client ID (`clientId`) and the client secret (`clientSecret`) from the OAuth2 Application you created in your Git provider, and the Git URL (`url`). > All fields in the secret _must be_ encoded in `base64`. @@ -154,7 +155,7 @@ data:
{:/} -#### Step 3: Configure OAuth2 settings for Custom Application in Codefresh +### Step 3: Configure OAuth2 settings for Custom Application in Codefresh Configure the settings for the Custom OAuth2 Application in Codefresh. Configuring the settings creates a K8s ConfigMap that references the OAuth secret credentials. When configuring the settings, you can work in Form mode, or directly update the YAML manifest. @@ -210,9 +211,16 @@ Configure the settings for the Custom OAuth2 Application in Codefresh. Configuri You have completed the setup to authorize Codefresh as an OAuth App for your Git provider. -### Select authentication mechanism for runtime +## Select authentication mechanism for runtime For a Git provider and a runtime account, select the authentication mechanism: Codefresh account, Custom provider account if one exists, or token-based authentication. 1. In the Codefresh UI, go to [Authentication](https://g.codefresh.io/2.0/account-settings/authentication?providerName=github){:target="\_blank"}. 1. Select the runtime, and click ![](/images/administration/users/icon-Edit.png?display=inline-block) **Edit**. -1. Select the OAuth authentication provider account. \ No newline at end of file +1. Select the OAuth authentication provider account. + + +## Related articles +[Adding users and teams]({{site.baseurl}}/_docs/administration/account-user-management/add-users/) +[Configuring access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Codefresh IP addresses]({{site.baseurl}}/docs/administration/account-user-management/platform-ip-addresses/) + \ No newline at end of file diff --git a/_docs/administration/platform-ip-addresses.md b/_docs/administration/account-user-management/platform-ip-addresses.md similarity index 60% rename from _docs/administration/platform-ip-addresses.md rename to _docs/administration/account-user-management/platform-ip-addresses.md index 2676689f7..aa8e73ef6 100644 --- a/_docs/administration/platform-ip-addresses.md +++ b/_docs/administration/account-user-management/platform-ip-addresses.md @@ -2,17 +2,18 @@ title: "Codefresh IP addresses" description: " " group: administration +sub_group: account-user-management toc: true --- Access to Kubernetes clusters behind strict firewalls not accessible from the public internet is governed through authorized IP addresses. Codefresh provides a list of IP addresses to be configured on clusters to allow access to them. -You can register multiple external clusters to Codefresh runtimes, both hosted and hybrid. All runtimes require Codefresh platform IPs to be configured on the clusters. -In addition, managed clusters registered to hosted runtimes must be configured with a set of specific IP addresses to authorize access. +You can register multiple external clusters to the Codefresh Runner and GitOps Runtimes. All Runtimes require Codefresh platform IPs to be configured on the clusters. +In addition, managed clusters registered to Hosted GitOps Runtimes must be configured with a set of specific IP addresses to authorize access. -### Codefresh platform IPs (updated July 31st 2021) +## Codefresh platform IPs (updated July 31st 2021) All the IPs are NAT gateways, and need to enable specific IPs instead of ranges. @@ -37,14 +38,23 @@ All the IPs are NAT gateways, and need to enable specific IPs instead of ranges. - 44.228.66.171 - 44.238.167.159 - 44.237.63.217 +- 44.237.63.217 +- 52.6.148.44 +- 52.73.90.9 +- 52.72.0.154 +- 52.73.76.60 +- 3.228.62.77 +- 44.205.132.73 +- 34.235.30.144 +- 54.160.88.80 -### Codefresh IPs for managed clusters in hosted runtimes +## Codefresh IPs for Hosted GitOps Runtimes - 34.207.5.18 - 34.232.79.230 - 44.193.43.5 -### Define API access to IPs for clusters +## API access to IPs for clusters Clusters must be configured with API access to the authorized Codefresh IPs. If you haven't configured your clusters with the required IPs, use the links below to complete the configuration for the clusters listed: @@ -54,7 +64,8 @@ If you haven't configured your clusters with the required IPs, use the links bel [GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters){:target="\_blank"} -### What to read next -[Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/) -[Install hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/) -[Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/) \ No newline at end of file +## What to read next +[Codefresh Runner installation]({{site.baseurl}}/docs/installation/codefresh-runner/) +[Set up a Hosted GitOps Runtime]({{site.baseurl}}/docs/installation/hosted-runtime/) +[Install Hybrid GitOps Runtimes]({{site.baseurl}}/docs/runtime/hybrid-gitops/) + \ No newline at end of file diff --git a/_docs/administration/add-users.md b/_docs/administration/add-users.md deleted file mode 100644 index be1033730..000000000 --- a/_docs/administration/add-users.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "Manage users" -description: "" -group: administration -toc: true ---- - -If you have a Codefresh account, you can add any number of users to collaborate on repositories, workflows, and pipelines. - -Adding a user requires assigning a role, and optionally, an SSO provider: - -**Role**: Defines the user's access level, and is by default set to User. The Administrator role has full access to account settings. -For guidelines on access control, see [Access control]({{site.baseurl}}/docs/administration/access-control/). - -**SSO**: By default, SSO is not enabled for users. If required, explicitly select the SSO provider. For an overview of SSO, see [Single Sign on]({{site.baseurl}}/docs/administration/single-sign-on/). - -### Add a user -1. In Codefresh, click **Account Settings**. -1. From the sidebar, select [Collaboration](https://g.codefresh.io/2.0/account-settings/users){:target="\_blank"}. - - {% include - image.html - lightbox="true" - file="/images/administration/users/users-list.png" - url="/images/administration/users/users-list.png" - alt="Users list" - caption="Users list" - max-width="40%" - %} - -{:start="3"} -1. Select **Users**, and then select **+ [Add User]**. - - {% include - image.html - lightbox="true" - file="/images/administration/users/invite-user.png" - url="/images/administration/users/invite-user.png" - alt="Add new user" - caption="Add new user" - max-width="40%" - %} - - 1. Type the **User's email address**. - 1. **Assign a role**, by selecting either **User** or **Administrator**. - 1. If SSO is configured for the account, **Select SSO provider**. - - -The user receives an email invitation, and the Users page is updated with information on the user. -The Status column shows Invite sent until the user accepts the invitation, when the user account is created. - -> For invitations pending accept, select ![](/images/administration/users/icon-Send.png?display=inline-block) **Resend invite**. - To edit account settings, select ![](/images/administration/users/icon-Edit.png?display=inline-block) **Edit**. - To remove the user account, select ![](/images/administration/users/icon-Delete.png?display=inline-block) **Delete**. - - -### Troubleshoot add users -[User is prompted to enter an organization name](https://support.codefresh.io/hc/en-us/articles/360020177959-User-is-prompted-to-enter-an-organization-name) -[Account invitation not permitting login](https://support.codefresh.io/hc/en-us/articles/360015251000-Account-invitation-not-permitting-login) - diff --git a/_docs/administration/audit.md b/_docs/administration/audit.md deleted file mode 100644 index eca087594..000000000 --- a/_docs/administration/audit.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Audit" -description: "" -group: administration -toc: true ---- - -Most entities in Codefresh are GitOps-compliant, and fully controlled via the GitOps approach. - -For information on which entities and how they are controlled, review [access control]({{site.baseurl}}/docs/administration/access-control/). - -Audit logs are available for GitOps-compliant entities. - -View audit logs: - -* Of Git Sources, in the **Notifications** panel -* Of pipeline entities, in the **Update History** tab -* In your Git repository - -### Git Source changes in Notifications -The **Notifications** panel is a pull-down panel, always available in the Codefresh toolbar. The panel shows a recent view of changes to entities such as Git Sources. - - -{% include -image.html -lightbox="true" -file="/images/administration/audit/notifications.png" -url="/images/administration/audit/notifications.png" -alt="Git Sources change log in Notifications" -caption="Git Sources change log in Notifications" -max-width="30%" -%} - -### Pipeline entity changes in Update History -When you drill down into a pipeline, the **Update History** tab shows the list of changes to all its underlying entities. - -{% include -image.html -lightbox="true" -file="/images/administration/audit/update-history.png" -url="/images/administration/audit/update-history.png" -alt="Pipeline entity change log in Update History" -caption="Pipeline entity change log in Update History" -max-width="30%" -%} - - -### Git repo change log - -A change to a GitOps-controlled resource in Codefresh is made by Codefresh impersonating and pushing commits to your Git Sources. -The Git repository linked to the Git Source shows all the commits. - - -### (Future) Centralized audit log in account settings -We plan to create a centralized location from which to view all API operations. - diff --git a/_docs/administration/single-sign-on/sso-azure.md b/_docs/administration/single-sign-on/sso-azure.md deleted file mode 100644 index 95e790fce..000000000 --- a/_docs/administration/single-sign-on/sso-azure.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: "Azure Single Sign-On (SSO)" -description: " " -group: administration -sub_group: single-sign-on -redirect_from: - - /docs/enterprise/single-sign-on/sso-azure/ -toc: true ---- - -Setting up SSO for Azure in Codefresh, requires you to register Codefresh in Azure AD with the required permissions and the client secret, configure the SSO settings in Codefresh, and then define the Client ID in Azure AD. -For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). - -### Prerequisites -* Azure user roles: *Application Administrator* or *Global Administrator* roles. - These roles are required after the SSO integration is complete to [sync teams from Azure to Codefresh]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). - - -### Register Codefresh in Azure AD -Register the Codefresh application in Azure AD. - -1. Log in to **Azure Portal**, and from the sidebar, select **Azure Active Directory**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/1-azure-service.png" - url="/images/administration/sso/azure/1-azure-service.png" - alt="Azure Active Directory" - caption="Azure Active Directory" - max-width="30%" - %} - -{:start="2"} -1. From the sidebar, select **App registrations**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/2-app-registrations.png" - url="/images/administration/sso/azure/2-app-registrations.png" - alt="Azure App Registrations" - caption="Azure App Registrations" - max-width="30%" - %} - -{:start="3"} -1. To add the new application, select **+ New registration**. - Enter a name for the application, e.g. Codefresh, and for all other options, retain default settings. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/3-register-an-app.png" - url="/images/administration/sso/azure/3-register-an-app.png" - alt="Azure App Registration creation" - caption="Azure App Registration creation" - max-width="30%" - %} -{:start="4"} -1. To apply your changes, select **Register**. The application is now registered in Azure AD. - - -### Configure permissions for Codefresh - -After registering Codefresh, configure the permissions. - -1. Select the application name to open **Settings**. -1. Select **API permissions**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/5-api-permissions.png" - url="/images/administration/sso/azure/5-api-permissions.png" - alt="Azure App API Permissions" - caption="Azure App API Permissions" - max-width="30%" - %} -{:start="3"} -1. To change access levels, select **Add a permission**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/6-request-api-permissions.png" - url="/images/administration/sso/azure/6-request-api-permissions.png" - alt="Azure App Change Permissions" - caption="Azure App Change Permissions" - max-width="30%" - %} -{:start="4"} -1. Find and select **Azure Active Directory Graph**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/7-request-api-permissions.png" - url="/images/administration/sso/azure/7-request-api-permissions.png" - alt="Azure Active Directory Graph entry" - caption="Azure Active Directory Graph entry" - max-width="30%" - %} -{:start="5"} -1. Select **Application permissions**, and select the following permissions: - * `Directory.Read.All` - * `Group.Read.All` - * `User.Read.All` - - >Note: - User.Read for the type of delegated is required. This permission is usually added by default. - -{:start="6"} -1. Select **Apply Permissions**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/ApplicationPermissions.png" - url="/images/administration/sso/azure/ApplicationPermissions.png" - alt="API Permissions" - caption="API Permissions" - max-width="30%" - %} - -{:start="7"} -1. From the bar on the top, select **Grant admin consent**. - -### Create Client Secret - -1. From the sidebar, select **Certificates & secrets**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/9-Create-secret-page.png" - url="/images/administration/sso/azure/9-Create-secret-page.png" - alt="Change keys" - caption="Change keys" - max-width="30%" - %} -{:start="2"} -1. Select **New Client secret**, and add a description (arbitrary name). - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/10-Add-client-secret.png" - url="/images/administration/sso/azure/10-Add-client-secret.png" - alt="Add a client secret" - caption="Add a client secret" - max-width="30%" - %} -{:start="3"} -1. Select the desired duration. - >**Important:** If you select a key with an expiration date, record the expiration date in your calendar. Remember to renew the key before the expiration date to ensure that users don't experience a service interruption. -1. To display the key, select **Add**. -1. Copy the value of the key as you will need this when you configure the SSO settings for Azure in Codefresh. - -### Configure SSO for Azure in Codefresh - -1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. -1. Select **+ Add Single Sign-On**, select **Azure**, and then select **Next**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/sso-csdp-azure.png" - url="/images/administration/sso/azure/sso-csdp-azure.png" - alt="SSO settings for Azure in Codefresh" - caption="SSO settings for Azure in Codefresh" - max-width="30%" - %} - - * **Client Name**: For auto-generation, leave empty. Codefresh generates the client name once you save the settings. - * **Display Name**: Meaningful name that identifies the SSO provider. - * **Application ID**: The Application ID in Azure - * **Client secret**: The key value you copied when you created the client secret in Azure - * **Tenant**: `.onmicrosoft.com` - * **Object ID**: Your Azure Service Principal Object ID (from Enterprise Application configuration) - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/client-name.png" - url="/images/administration/sso/azure/client-name.png" - alt="SSO Client Name" - caption="SSO Client Name" - max-width="50%" - %} - You need this value when you configure the reply URL in the Azure portal. - -### Configure reply URLs -This is the final step in SSO setup for Azure. Add the Codefresh callback URL to the allowed reply URLs for the created application in Azure AD. -1. Go to **Azure Active Directory > Apps registrations**, and select your app. -1. Select **Add a Redirect URI**, and define: - - ``` - https://g.codefresh.io/api/auth//callback - - ``` - - where: `` is the Client Name in the SSO configuration, either defined by you or created by Codefresh. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/12-set-reply-URL.png" - url="/images/administration/sso/azure/12-set-reply-URL.png" - alt="Reply URLs" - caption="Reply URLs" - max-width="30%" - %} -{:start="3"} -1. On the same page, scroll down and select **ID tokens**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/13-Enable-ID-Tokens.png" - url="/images/administration/sso/azure/13-Enable-ID-Tokens.png" - alt="Reply URLs" - caption="Reply URLs" - max-width="30%" - %} - -You have now completed the SSO setup for Azure. - -##### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. diff --git a/_docs/administration/single-sign-on/sso-setup-oauth2.md b/_docs/administration/single-sign-on/sso-setup-oauth2.md deleted file mode 100644 index 93d4f5fc4..000000000 --- a/_docs/administration/single-sign-on/sso-setup-oauth2.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: "Setting Up OpenID Connect Federated Single Sign-On (SSO)" -description: "" -group: administration -sub_group: single-sign-on -redirect_from: - - /docs/sso/sso-setup-oauth2/ - - /docs/enterprise/single-sign-on/sso-setup-oauth2/ -toc: true ---- - -Codefresh natively supports login using GitHub, Bitbucket and GitLab using the OpenID Connect (OAUTH 2.0) protocol. You can add new SSO integrations based on OAUTH 2.0 as part of the Codefresh Enterprise plan. - - -### Prerequisites - -To successfully add an identity provider in Codefresh, you must configure settings both for the identity provider and in Codefresh. -You need to: - -1. Configure your identity provider to provide SSO services to Codefresh. The configuration differs per identity provider. -1. Set up Codefresh to point to your identity provider, common for all identity providers. - -> SSO is only available to Enterprise customers. Please [contact sales](https://codefresh.io/contact-sales/) in order to enable it for your Codefresh account. - -### SSO configuration using OAuth2 - -SSO configuration in Codefresh is similar regardless of the identity provider selected. These settings are common to all providers: - -* **Display Name**: The name of your identity provider -* **Client ID**: The ID used for the connection -* **Client Secret**: The secret associated with the ID - -For detailed information on how to configure SSO for your identity provider, see the following: - -[Azure]({{site.baseurl}}/docs/administration/single-sign-on/sso-azure/) -[Google]({{site.baseurl}}/docs/administration/single-sign-on/sso-google/) -[Okta]({{site.baseurl}}/docs/administration/single-sign-on/sso-okta/) -[OneLogin]({{site.baseurl}}/docs/administration/single-sign-on/sso-onelogin/). - - -### Test SSO with your identity provider - -Once you configure SSO for your identity provider, do the following: -1. On the sidebar, below **User Management**, select **People**. -1. Add an active user for testing purposes. We recommend you use your own user. -1. Change Login method by selecting your Auth provider in the SSO drop-down. - - {% include image.html -lightbox="true" -file="/images/administration/sso/collaborators.png" -url="/images/administration/sso/collaborators.png" -alt="Adding collaborators" -caption="Adding collaborators" -max-width="30%" -%} - -{:start="3"} -1. Keep the current browser session open, and log in via Corporate SSO in an incognito tab (or another browser). - - {% include image.html -lightbox="true" -file="/images/administration/sso/sign-with-sso.png" -url="/images/administration/sso/sign-with-sso.png" -alt="Sign-in with SSO" -caption="Sign-in with SSO" -max-width="50%" -%} - -{:start="4"} -1. If everything works as expected, add more users. - ->Before enabling SSO for all users, you **MUST** make sure that it works for the test user. Once SSO is enabled for a user, Codefresh blocks logins through other IDPs for this user, and only allows login through the enabled SSO. If the selected SSO method does not work for some reason, the user is locked out of Codefresh. - - -## Select SSO method for collaborators - -To add users and select their SSO method, from the sidebar, select **Collaborators**. Then add the user's email or Codefresh username. -In addition to their role, you can now select the SSO method to use: - - {% include image.html -lightbox="true" -file="/images/administration/sso/select-user-sso.png" -url="/images/administration/sso/select-user-sso.png" -alt="Selecting SSO method" -caption="Selecting SSO method" -max-width="50%" -%} - -**SSO login for new and existing users** -If you have multiple SSO providers configured, you can select a different provider for each user if so required. - -* New users - If you have an SSO provider selected as the default, that provider is automatically assigned to new users, added either manually or via team synchronization. - -* Existing users - SSO login is not configured by default for existing users. You must _explicitly select_ the SSO provider for existing users. - If SSO login is already configured for an existing user, and you add a new identity provider, to change the SSO login to the new provider, you must _select_ the new provider for the user. - - -### Define a default identity provider - -If you have multiple identity providers for SSO, you can define one of them as your default provider. -When you define a default provider: -* The SSO method is automatically selected for all newly invited users -* All new users receive an email with an invite link that points directly to the login page of that SSO provider - - -1. Mouse over the top-right of the SSO screen - - {% include image.html -lightbox="true" -file="/images/administration/sso/default-sso.png" -url="/images/administration/sso/default-sso.png" -alt="Default SSO provider" -caption="Default SSO provider" -max-width="90%" -%} - -### Sync teams after initial SSO setup - -Once the initial setup is done, you can also sync your teams between Codefresh and the identity provider. -You can do this via the [Codefresh Cli](https://codefresh-io.github.io/cli/), using the [sync command](https://codefresh-io.github.io/cli/teams/synchronize-teams/). - -For example, to sync you azure teams you can execute: - -``` -codefresh synchronize teams my-client-name -t azure - -``` - -You can find the client-name from the SSO UI. - -{% include image.html -lightbox="true" -file="/images/administration/sso/azure/client-name.png" -url="/images/administration/sso/azure/client-name.png" -alt="SSO Client Name" -caption="SSO Client Name" -max-width="40%" -%} - -Even though you can run this command manually, it makes more sense to run it periodically as a job. And the obvious -way to perform this is with a Codefresh pipeline. The CLI can be used as a [freestyle step]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/). - -You can create a git repository with a [codefresh.yml]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) file with the following contents: - -`YAML` -{% highlight yaml %} -{% raw %} -version: '1.0' -steps: - syncMyTeams: - title: syncTeams - image: codefresh/cli - commands: - - 'codefresh synchronize teams my-client-name -t azure' -{% endraw %} -{% endhighlight %} - -To fully automate this pipeline, set a [cron trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/cron-triggers/) for this pipeline. The cron-trigger is responsible for running this pipeline, (and therefore synchronizing the teams), in a fully automated manner. -This way you can synchronize your teams every day/week/hour depending on you cron trigger setup. - diff --git a/_docs/administration/single-sign-on/sso-setup-saml2.md b/_docs/administration/single-sign-on/sso-setup-saml2.md deleted file mode 100644 index 3e674085c..000000000 --- a/_docs/administration/single-sign-on/sso-setup-saml2.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Setting Up SAML2 Federated Single Sign-On (SSO)" -description: "" -group: administration -sub_group: single-sign-on -redirect_from: - - /docs/sso/sso-setup-saml2/ - - /docs/enterprise/single-sign-on/sso-setup-saml2/ -toc: true ---- - -Codefresh natively supports login using GitHub, Bitbucket and GitLab using the OpenID Connect (OAUTH 2.0) protocol. You can add new SSO integrations based on OAUTH 2.0 as part of Codefresh Enterprise plan. - -As Identity Providers (IdPs) come in all shapes and sizes, the following topic discusses in general what you must do to configure Federated SSO. - As you will see in the description below, the person in your organization responsible for managing your IdP will need to interact with Codefresh support team to successfully set up a trust between your IdP and Codefresh SP. - -{:.text-secondary} -### Before you set up Federated SSO - 1. Have your account set up with Codefresh enterprise plan. - 2. Ensure you have a working SAML 2.0 compliant identity provider (IdP). - 3. Identify someone in your organization who is familiar with configuring and managing your organization's IdP. - 4. Ensure that your IdP's system clock is synchronized with a reliable time source. If it's not, tokens generated will be unusable and SSO will fail. - -{:.text-secondary} -### Summary of Federated SSO setup - -{% include image.html - lightbox="true" - file="/images/sso-flow.png" - url="/images/sso-flow.png" - alt="sso-flow.png" - max-width="100%" -%} - -{:.text-secondary} -### SAML attributes - -Codefresh expects the following user attributes to be passed through SAML between your IdP and Codefresh SP: - - User email address - - User first name - - User last name - - User full name - - User unique ID that isn't subject to change in your identity management environment - -{:.text-secondary} -## How does the connection process work? - - {% include image.html -lightbox="true" -file="/images/sso-diagram.png" -url="/images/sso-diagram.png" -alt="sso-diagram.png" -max-width="100%" - %} - -Once Federated SSO has been configured, the process works as follows: - -
- - Steps 2 to 7 occur in the background and are transparent to the user. -
- -1. A user logs in to CDSP -2. The user is redirected to Codefresh Service Provider (SP) to initiate SSO -3. The user’s browser is then redirected to the customer IdP -4. Once authenticated by the corporate side, a SAML token is sent to the user’s browser -5. The SAML assertion is then forwarded to Codefresh SP -6. If you are a valid Codefresh user for this SSO connection, an SSO token is returned to the user’s browser -7. The user’s browser then returns a token to Codefresh and access is granted for your account - -### Configure SAML SSO settings in Codefresh - -1. In Codefresh, select **Account settings**. -1. From the sidebar expand **Collaboration**, and select **Single Sign-on**. - OR - Go directly to [https://g.codefresh.io/account-admin/sso](https://g.codefresh.io/account-admin/sso)) - - - {% include image.html - lightbox="true" -file="/images/administration/sso/add-sso-dropdown.png" -url="/images/administration/sso/add-sso-dropdown.png" -alt="SSO provider settings" -caption="SSO provider settings" -max-width="70%" -%} - -{:start="3"} -1. Select **Add single-sign-on**, and then select **SAML**. -1. Enter the following: - - * **Client Name**: For auto-generation, leave empty. Codefresh generates the client name once you save the settings. - * **Display Name**: The name you want to give to this integration. - * **IDP Entry**: The SSO endpoint of your Identity Provider. For Azure SAML, for example, this is the Login URL. - * **Application Certificate**: The security certificate of your Identity Provider. Paste the value directly in the field. Do not convert to base64 or any other encoding by hand. (For Azure SAML, this will be Certificate (Base64) and the value needed is between the -----BEGIN ... and -----END... from the downloaded cert) - * **Assertion URL**: `https://g.codefresh.io/api/auth//callback​` - where ​​ is he client name that is automatically generated when saving the SSO settings. - * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. -1. Select **Save**, and note down the `Client Name` that is generated. - - -### Configure IdP settings for Codefresh as a Service Provider -In the settings of your Identity Provider, create a new Service Provider and provide the following: - - * **Service Provider SSO Endpoint**: Assertion consumer service URL - `https://g.codefresh.io/api/auth//callback` - * **Service Provider Entity ID**: `g.codefresh.io` - -The mandatory fields needed for SAML assertions are: -1. firstName: User's first name -1. lastName: User's last name -1. email: User's email - -To configure users sync for SAML IDP, do the following: - -1. Select a G Suite provider -1. Enable Auto Sync users and teams to Codefresh -1. Set JSON Keyfile, Admin Email and Sync interval - -The instructions for getting the JSON Keyfile, and Admin Email are the same as for [Google SSO]({{site.baseurl}}/docs/administration/single-sign-on/sso-google/#synchronize-teams-with-the-codefresh-cli). - ->Note - These settings are for the SaaS version of Codefresh. For an on-premises setup, use the URLs that match your installation. - -Once everything is finished, you [should test the integration]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider). Once it's working, proceed to the next steps that are: - -* [Selecting SSO method for collaborators]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#selecting-sso-method-for-collaborators) - ->Notice that Codefresh has an internal cache for SSO configurations and it might take up to five minutes for your changes to take effect. - -## OneLogin SAML Setup - -1. In OneLogin, go to the [Applications](https://cfsupport.onelogin.com/apps) Section. -1. Select 'Add App' on the top right. -1. Search for 'SAML Custom Connector' (advanced) and select it. -1. Add a Display Name (the rest is optional) and Save. -1. View the SSO Section. -1. Open a New Tab and go to the [Single Sign-On](https://g.codefresh.io/account-admin/sso) settings in Codefresh. -1. In Codefresh, select SAML for the Add Single Sign-On. - * Display Name = any arbitrary name you want to give in this integration. - * IDP Entry = SAML 2.0 Endpoint (HTTP) from the SSO section in OneLogin. - * Application Certificate = X.509 Certificate from the SSO section in OneLogin. - * Click View Details (preferable open in a new tab). - * Under X.509 Certificate, click the copy button. - * Paste the contents into the Application Certificate. - * Remove the -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----. - * Save. -1. Click edit on the SAML integration we created. - * Copy the Assertion URL -1. In OneLogin, view the Configuration section. - * Audience (EntityID) = g.codefresh.io - * Recipient = Assertion URL - * ACS (Consumer) URL Validator= Assertion URL but in Regex form. View OneLogin's [Setup Page](https://onelogin.service-now.com/support?id=kb_article&sys_id=c89fefdadb2310503de43e043996195a&kb_category=93e869b0db185340d5505eea4b961934) for more info. - * ACS (Consumer) URL = Assertion URL - * Login URL = https://g.codefresh.io/login - * SAML Initiator = Service Provider - * Save -1. In OneLogin, Go the [Users](https://cfsupport.onelogin.com/users) page. - * Select the User - * Go to Applications Section - * Click the **+** to add - * Select the SAML App (will show the Display Name from step 7) - * Click Continue - * Make sure NameID value = email address - * Save - -> Once the configuration is complete, please test the integration before enabling the SSO for all users. - - - - - - diff --git a/_docs/administration/user-settings.md b/_docs/administration/user-self-management/manage-pats.md similarity index 57% rename from _docs/administration/user-settings.md rename to _docs/administration/user-self-management/manage-pats.md index c8f130c5e..d4fb426c7 100644 --- a/_docs/administration/user-settings.md +++ b/_docs/administration/user-self-management/manage-pats.md @@ -1,57 +1,25 @@ --- -title: "User settings" +title: "Managing Git PATs" description: "" group: administration +sub_group: user-self-management toc: true --- -As a user in Codefresh, you can manage your account by authorizing access to your Git provider accounts, and optionally, enabling access for Codefresh support. +As a user in Codefresh, you must authorize access to your Git provider accounts, and authenticate Git-based actions from Codefresh clients, per provisioned runtime. +The authorization method depends on the Git provider and on what authorization has been set up by your account admin. +* If your admin has set up authentication with OAuth2, you can authorize access using OAuth2. +* You can always generate a personal access token from your Git provider and then add the same to Codefresh to authorize access. -* Enable access for Codefresh support - Optional. Enable access to your account for troubleshooting purposes. - -* Authorize Git providers - The Git personal token is a user-specific access token, required to authenticate Git-based actions from Codefresh clients, per provisioned runtime. - - - The authorization method depends on the Git provider and on what authorization has been set up by your account admin. - - - If your admin has set up authentication with OAuth2, you can authorize access using OAuth2. - Or, you can always generate a personal access token from your Git provider and then add the same to Codefresh to authorize access. - - > If you have access to more than one runtime, you can use the same token for multiple runtimes. - You must however authorize access individually for each runtime. +> If you have access to more than one runtime, you can use the same token for multiple runtimes. + You must however authorize access individually for each runtime. {::nomarkdown}
{:/} -### Enable access for Codefresh support -Enable Codefresh support personnel to access your user account. Access to your account is useful for visibility during troubleshooting. - -You can disable this security setting at any time. - -> Codefresh personnel takes action only after confirmation from you, and all actions are audited. - -1. In the CSDP UI, go to [User Settings](https://g.codefresh.io/2.0/user-settings){:target="\_blank"}. -1. Enable **Allow Codefresh support tem to log in...**. -{% include - image.html - lightbox="true" - file="/images/administration/user-settings/security-enable-support-access.png" - url="/images/administration/user-settings/security-enable-support-access.png" - alt="Enable access for Codefresh support" - caption="Enable access for Codefresh support" - max-width="50%" -%} - -{::nomarkdown} -
-{:/} - -### Authorize Git access in Codefresh +## Authorize Git access in Codefresh Authorize Git access with OAuth2 if your account admin has set up Codefresh as an OAuth application, or alternatively through personal access tokens from your Git provider. >Notes: For OAuth2: The adminstrator pre-configures the permissions and expiry date. Once you supply your credentials for authorization, you are automatically directed to the Git Personal Tokens page. @@ -67,7 +35,7 @@ Make sure you have: **How to** -1. In the Codefresh UI, go to [User Settings](https://g.codefresh.io/2.0/user-settings){:target="\_blank"}. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Git Personal Access Token** (TBD(https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}). 1. Select the runtime, and then do one of the following: * To add a token, select **Add Token**. * To update an existing token by replacing it with a new token, select **Update Token**. @@ -87,7 +55,7 @@ Make sure you have: -{:start="5"} +{:start="4"} 1. Click **Add Token**. In the Git Personal Access Tokens list, you can see that the new token is assigned to the runtime. @@ -95,7 +63,7 @@ Make sure you have:
{:/} -#### Generate GitHub personal access tokens +### Generate GitHub personal access tokens 1. Log in to your GitHub or GitHub Enterprise account. 1. Select **Settings > Developer Settings > Personal Access Tokens > Tokens (classic)**. @@ -107,8 +75,8 @@ Make sure you have: {% include image.html lightbox="true" - file="/images/administration/user-settings/github-pat-scopes.png" - url="/images/administration/user-settings/github-pat-scopes.png" + file="/images/administration/manage-pats/github-pat-scopes.png" + url="/images/administration/manage-pats/github-pat-scopes.png" alt="GitHub personal access token scopes" caption="GitHub personal access token scopes" max-width="50%" @@ -121,7 +89,7 @@ Make sure you have:
{:/} -#### Generate GitLab personal access tokens +### Generate GitLab personal access tokens 1. Log in to your GitLab Cloud or Server account. 1. Select **User settings > Access tokens**. @@ -133,8 +101,8 @@ Make sure you have: {% include image.html lightbox="true" - file="/images/administration/user-settings/gitlab-pat-scopes.png" - url="/images/administration/user-settings/gitlab-pat-scopes.png" + file="/images/administration/manage-pats/gitlab-pat-scopes.png" + url="/images/administration/manage-pats/gitlab-pat-scopes.png" alt="GitLab personal access token scopes" caption="GitLab personal access token scopes" max-width="50%" @@ -150,7 +118,7 @@ Make sure you have:
{:/} -#### Generate Bitbucket personal access tokens +### Generate Bitbucket personal access tokens 1. Log in to your Bitbucket Cloud or Server account. @@ -164,8 +132,8 @@ Make sure you have: {% include image.html lightbox="true" - file="/images/administration/user-settings/bitbucket-pat-scopes.png" - url="/images/administration/user-settings/bitbucket-pat-scopes.png" + file="/images/administration/manage-pats/bitbucket-pat-scopes.png" + url="/images/administration/manage-pats/bitbucket-pat-scopes.png" alt="Bitbucket personal access token scopes" caption="Bitbucket personal access token scopes" max-width="50%" @@ -178,5 +146,5 @@ Make sure you have:
{:/} -### Related articles +## Related articles [Git tokens in Codefresh]({{site.baseurl}}/docs/reference/git-tokens/) \ No newline at end of file diff --git a/_docs/administration/user-self-management/user-settings.md b/_docs/administration/user-self-management/user-settings.md new file mode 100644 index 000000000..32a9bbf97 --- /dev/null +++ b/_docs/administration/user-self-management/user-settings.md @@ -0,0 +1,114 @@ +--- +title: "Manage personal user settings" +description: "Manage your personal settings" +group: administration +sub_group: user-self-management +toc: true +--- + +As a Codefresh user, you can manage several settings in your personal account, including: + +* Email notifications for builds and build usage +* Grant account access to Codefresh support +* Grant access to private Git repositories +* Create and manage API keys + +> To manage Git personal access tokens for GitOps, see [Managing PATs]({{site.baseurl}}/docs/administration/user-self-management/manage-pats). + +## Access user settings +* In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **User Settings** (https://g.codefresh.io/user/settings){:target="\_blank"}. + +## Email notifications for pipeline builds + +Configure the email notifications you want to receive for builds based on the build status: only successful, only failed, or for both successful and failed builds. + +> By default, email notifications for builds are disabled for _all users_. + +* In **Notifications**, define the email address and select the notifications: + * Email address for the notifications. By default, it's the same address you used to [sign up]({{site.baseurl}}/docs/administration/account-user-management/create-a-codefresh-account/). +* Select the build statuses for which to receive notifications. + + + +{% include image.html +lightbox="true" +file="/images/administration/user-settings/notifications.png" +url="/images/administration/user-settings/notifications.png" +alt="Email notifications for pipeline builds" +caption="Email notifications for pipeline builds" +max-width="50%" +%} + + + +## Weekly updates of build usage + +Select to receive weekly summaries of builds across your pipelines along with other statistical data. This information can be useful if you want to understand your overall project build health and capacity usage. + +* In **Updates**, select or clear **Receive updates...**. + + +## Enable access for Codefresh support + +Enable Codefresh support personnel to access your user account. Access to your account is useful for visibility during troubleshooting. If you have an issue with the Codefresh platform, our support personnel can log into your account and look at running builds, inspect Docker images, run pipelines for you etc. + +You can disable this security setting at any time. + +>Codefresh personnel takes action only after confirmation from you, and all actions are audited. + +* In **Security**, select **Allow Codefresh support team to log in…**.. + + +{% include image.html +lightbox="true" +file="/images/administration/user-settings/allow-support-access.png" +url="/images/administration/user-settings/allow-support-access.png" +alt="Allow access to Codefresh support" +caption="Allow access to Codefresh support" +max-width="100%" +%} + + + + +## Create and manage API keys + +Generate new API keys to access Codefresh functionality from your scripts or applications, outside the Codefresh UI. Edit scopes for existing keys, or revoke them when needed. +For details, see [Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions). + +>Tokens are visible only during creation. You cannot "view" an existing token. To re-enable API access for an existing application, you must delete the old token and create a new one. + + The UI shows the first few characters in the second part of the key, after the `.`, and not the characters at the beginning of the key. + + + + +1. In **API Keys**, to generate a new API key, click **Generate**. +1. Select the scopes for the key. + + +{% include image.html +lightbox="true" +file="/images/integrations/api/generate-token.png" +url="/images/integrations/api/generate-token.png" +alt="Generating a key for the API" +caption="Generating a key for the API" +max-width="80%" +%} + + + +## Related articles + + +[Manage Git PATs]({{site.baseurl}}/docs/administration/manage-pats) +[Single Sign on]({{site.baseurl}}/docs/administration/single-sign-on/) + + diff --git a/_docs/ci-cd-guides/access-docker-registry-from-kubernetes.md b/_docs/ci-cd-guides/access-docker-registry-from-kubernetes.md new file mode 100644 index 000000000..157d49ce6 --- /dev/null +++ b/_docs/ci-cd-guides/access-docker-registry-from-kubernetes.md @@ -0,0 +1,127 @@ +--- +title: "Accessing Docker registry from Kubernetes cluster" +description: "Allow Kubernetes to pull Docker images from your registry" +group: ci-cd-guides +toc: true +--- + +Kubernetes deployments are based on a "pull" approach. When you deploy your application to a Kubernetes +cluster, instead of uploading the application itself, as in traditional deployments, Kubernetes pulls the Docker images to its nodes on its own. + + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/overview.png" +url="/images/quick-start/quick-start-k8s/overview.png" +alt="Kubernetes deployments" +caption="Kubernetes deployments" +max-width="80%" +%} + +If your Docker images are in a public repository such as Docker Hub, Kubernetes can pull them right away. In most cases however your images are in a private Docker registry and Kubernetes must be given explicit access to it. + +Use [Docker registry secrets](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/){:target="\_blank"} to give Kubernetes access to private Docker registries. When there is a deployment, each Kubernetes pod can pull Docker images directly from the target registry. + +## Giving access to a Docker Registry via the UI + +Codefresh allows you to easily create and pull secrets for your cluster. + +1. In the Codefresh UI, set up an integration with your [Docker registry in Codefresh]({{site.baseurl}}/docs/integrations/docker-registries/). + Codefresh can work with any compliant Docker registry either in the cloud or behind the firewall. + +1. To view the Kubernetes dashboard, from the Ops section in the sidebar, select [**Kubernetes Services**](https://g.codefresh.io/kubernetes/services/){:target="\_blank"}. +1. Click **Add Service**. +1. Do the following: + * Select your **Cluster** and **Namespace** from the respective lists. + * From the **Image Pull Secret** dropdown with all the pull secrets for the selected namespace, select **Create Registry Pull secret**. + * From the list of all the connected Docker registries in Codefresh, select the registry you want. + Codefresh automatically creates a secret for you. + + {% include +image.html +lightbox="true" +file="/images/guides/kubernetes/create-secret.png" +url="/images/guides/kubernetes/create-secret.png" +alt="Create Pull Secret" +caption="Create Pull Secret" +max-width="80%" +%} + + +>The secret is created as soon as you select your Docker registry from the dropdown. There is no need to actually deploy anything from this screen for the changes to take effect. + + {% include +image.html +lightbox="true" +file="/images/guides/kubernetes/secret-dropdown.png" +url="/images/guides/kubernetes/secret-dropdown.png" +alt="Docker Registry Access" +caption="Docker Registry Access" +max-width="80%" +%} + +From now on, the cluster in this namespace can deploy Docker images from the selected registry. +To apply the changed secret, you don't really need to finish the deployment. Feel free to +close the screen and go to another Codefresh page. + +>Codefresh automatically uses the secret you defined in all deployments that are performed via the UI by dynamically creating the correct manifests for you behind the scenes. +If you wish to use your own manifests, you need to include the secret yourself, as explained in the next section. + + +## Giving access to a Docker Registry with kubectl + +You can also use the `kubectl` command directly to give access to a Docker registry. +As this method is not specific to Codefresh, read the [official kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/){:target="\_blank"}. + + +### Creating the Docker registry secret + +The credentials depend upon the [type of registry]({{site.baseurl}}/docs/integrations/docker-registries/) you use. + +- The Docker server to use is a domain such `gcr.io`, `azurecr.io` +- The username is your account username. +- The password is a specific Docker registry password or any other kind of token. You need to check the documentation of your registry provider for the exact details. + +>Be sure to create the secret in the namespace in which your application will run. +Pull secrets are specific to a namespace. If you want to deploy to multiple namespaces, you need to create a secret for each one of them. + +This is an example of creating a pull secret to the Azure registry. You can use the same command for any other private registry. + + `Shell` +{% highlight sh %} +{% raw %} + +export DOCKER_REGISTRY_SERVER=mysampleregistry.azurecr.io +export DOCKER_USER=myregistryname +export DOCKER_PASSWORD=myregistrytoken +export DOCKER_EMAIL=YOUR_EMAIL + +kubectl create secret docker-registry cfcr\ + --docker-server=$DOCKER_REGISTRY_SERVER\ + --docker-username=$DOCKER_USER\ + --docker-password=$DOCKER_PASSWORD\ + --docker-email=$DOCKER_EMAIL +{% endraw %} +{% endhighlight %} + +### Using the Docker registry secret + +To use the secret you just created, you need to include it, either in: + +* Your [pod manifests](https://kubernetes.io/docs/concepts/containers/#specifying-imagepullsecrets-on-a-pod){:target="\_blank"} +* The [service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account){:target="\_blank"} + +For Docker registry secret usage, we recommend following the official Kubernetes documentation. + +## Giving access to a Docker Registry via the Codefresh CLI + +The Codefresh CLI can also create pull secrets in an automated manner. + +See [Image pull Secret](https://codefresh-io.github.io/cli/more/image-pull-secret/){:target="\_blank"}. + +## Related articles +[Deploy to Kubernetes - quick start]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) +[Managing your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) + + diff --git a/_docs/ci-cd-guides/add-config-maps-to-your-namespaces.md b/_docs/ci-cd-guides/add-config-maps-to-your-namespaces.md new file mode 100644 index 000000000..c525eba81 --- /dev/null +++ b/_docs/ci-cd-guides/add-config-maps-to-your-namespaces.md @@ -0,0 +1,146 @@ +--- +title: "Adding config maps to namespaces" +description: "Manage Kubernetes Config Maps with Codefresh" +group: ci-cd-guides +toc: true +--- +Many applications require configuration with files, environment variables, and command line arguments. It makes applications portable and easily manageable. While this makes for easy configuration, it can become very hard to support tons of config files for different environments and hundreds of microservices. + +Kubernetes provides an elegant and very convenient way for application configuration, using *configuration maps*. You can find more details about config maps at [https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/){:target="_blank"}. + +You can manage all your cluster configuration using Codefresh. + +## View existing config maps + +1. In the Codefresh UI, from the Ops section in the sidebar, select [**Kubernetes Services**](https://g.codefresh.io/kubernetes/services/){:target="\_blank"}. +1. Switch to list view. + +{% include +image.html +lightbox="true" +file="/images/guides/config-maps/change-view.png" +url="/images/guides/config-maps/change-view.png" +alt="Change View" +caption="Change View" +max-width="50%" +%} + +{:start="3"} +1. Select a namespace and hover over it. +1. Click the **Settings** icon which appears at the end of the row. + A list of all config maps within this namespace are displayed, including date of creation and number of configuration variables inside these maps. + + + +## Add a new config map + +1. From the list of config maps, click **Create a New Config Map**. + +{% include image.html +lightbox="true" +file="/images/guides/config-maps/manage-maps-namespace.png" +url="/images/guides/config-maps/manage-maps-namespace.png" +alt="Create a new config map in namespace" +caption="Create a new config map in namespace" +max-width="40%" +%} + +{:start="2"} +1. In the Add a New Config Map form, enter a **Name**, add variables, as described in [Managing variables in your config maps](#managing-variables-in-config-maps), and then click **Create**. + +{% include image.html +lightbox="true" +file="/images/guides/config-maps/new-config-map-settings.png" +url="/images/guides/config-maps/new-config-map-settings.png" +alt="Define settings for new config map" +caption="Define settings for new config map" +max-width="40%" +%} + +### Managing variables in config maps +There are three options to add variables to config maps: +1. Add a single variable at a time +1. Add multiple variables by copying and pasting from text or file +1. Import a set of variables from an existing config map + + +#### Add a single variable to config map + +This is the easiest way to add a variable to the config map. This method is very useful to quickly create a small configmap with 1-2 variables: +1. Enter Key name and the Key value +1. Click **Add Variable**. + +{% include image.html +lightbox="true" +file="/images/guides/config-maps/add-new-single-variable.png" +url="/images/guides/config-maps/add-new-single-variable.png" +alt="Add single variable at a time to config map" +caption="Add single variable at a time to config map" +max-width="40%" +%} + + +#### Import variables from text/file +If you already have configuration variables in a `*.property` file, you can easily import it to your configmap. + +**Import from text**: + + +1. Click **Import from text**. +1. Copy text from file and paste it within the text area in the required format. +1. Click **Apply**. + +{% include image.html +lightbox="true" +file="/images/guides/config-maps/import-variables-from-text.png" +url="/images/guides/config-maps/import-variables-from-text.png." +alt="Add multiple variables from text or file to config map" +caption="Add multiple variables from text or file to config map" +max-width="40%" +%} + +**Import from file**: + +1. Click **Import from file**. +1. Select the file from your computer, and click **Open**. + + +#### Copy variables from existing config map + +You can easily copy the variables from an existing config map file, and use it in other namespaces. + +1. Click **Copy from Existing Config Map**. +1. Select the **Cluster** and **Namespace** from which to copy the configmap. +1. Select the configmap from the list, and click **Select**. + +{% include image.html +lightbox="true" +file="/images/guides/config-maps/select-cluster-namespace.png" +url="/images/guides/config-maps/select-cluster-namespace.png" +alt="Copy variables from existing config map" +caption="Copy variables from existing config map" +max-width="40%" +%} + +### Edit/remove variables in config maps +You can easily edit or remove variables in your config maps. + +1. Select the config map with the variables to modify or remove. +1. Click the **Edit** (pencil) icon. +1. Add new variables, as described in [Managing variables in your config maps](#managing-variables-in-config-maps). + +{% include image.html +lightbox="true" +file="/images/guides/config-maps/edit-remove-config-map-variables.png" +url="/images/guides/config-maps/edit-remove-config-map-variables.png" +alt="Edit/remove variables in config maps" +caption="Edit/remove variables in config maps" +max-width="40%" +%} + +To remove a config map, click on "remove" icon in the selected row. After your confirmation, the configmap will be removed. + +## Related articles +[Connect to your Kubernetes cluster]({{site.baseurl}}/docs/integrations/add-kubernetes-cluster/) +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Deploy to Kubernetes - quick start]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) diff --git a/_docs/ci-cd-guides/building-docker-images.md b/_docs/ci-cd-guides/building-docker-images.md new file mode 100644 index 000000000..4c60597b0 --- /dev/null +++ b/_docs/ci-cd-guides/building-docker-images.md @@ -0,0 +1,440 @@ +--- +title: "Building Docker images" +description: "Create Docker images from Dockerfiles" +group: ci-cd-guides +toc: true +--- + +Codefresh has first-class Docker build support. You can build Docker images in your pipeline in a declarative manner using the [build step]({{site.baseurl}}/docs/pipelines/steps/build/). + +>If your application is not deployed as a Docker image, see the [basic compilation/packaging guide]({{site.baseurl}}/docs/ci-cd-guides/packaging-compilation/) instead. + +Building a Dockerfile in a pipeline works the same way as building the Dockerfile locally on your workstation. +Your Dockerfile should be valid, and follow all the best practices such as: +* Dockerfiles should be self-contained +* You should not have actions with side effects inside Dockerfiles +* You should have a proper `.dockerignore` file to minimize the Docker context size +* Dockerfile directives should be placed according to best practices for caching + +For more details, see also the [Caching in pipelines]({{site.baseurl}}/docs/pipelines/pipeline-caching/#distributed-docker-layer-caching). + At the very least, you should understand and use [Docker multistage builds](https://docs.docker.com/develop/develop-images/multistage-build/){:target="\_blank"} (although Codefresh supports all kinds of Dockerfiles natively). Basically, if your Dockerfile is already optimized on your local workstation, it should also be optimized for Codefresh. + +Codefresh uses the standard Docker daemon (or optionally Buildkit) behind the scenes, so if your Dockerfile has issues when you try to build it locally, it will have the same issues in a pipeline. + +## Docker packaging strategies + +There are many ways to create a Dockerfile, and most organizations typically follow a different path depending on the type of application they package. +Brand-new applications are very easy to package into multistage Dockerfiles, while legacy/existing applications are adapted to dockerfiles that package an existing artifact. + +We suggest spending some more time and creating multistage builds for all applications (even legacy ones). +Explaining all virtues of multistage Docker builds is outside the scope of this article but in summary, multistage builds: + +1. Are self-contained and self-describable +1. Result in a very small Docker image +1. Can be easily built by all project stakeholders, even non-developers +1. Are very easy to understand and maintain +1. Do not require a development environment, apart from the source code itself +1. Can be packaged with very simple pipelines, not only in Codefresh, but in other CI systems as well + +Multi-stage builds are also essential in organizations that employ multiple programming languages. The ease of building a Docker image by anyone without the need for JDK/Node/Python/etc. cannot be overstated. + +## Production-ready Docker images with multistage builds + +If you have a multistage Dockerfile, then the respective pipeline in Codefresh is straightforward. You only need two pipeline steps: + +1. A clone step to check out the source code +1. A build step to create the Docker image + +For example, here is a [Java dockerfile]({{site.baseurl}}/docs/example-catalog/ci-examples/java/spring-boot-2/#spring-boot-2-and-docker-multi-stage-builds): + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM maven:3.5.2-jdk-8-alpine AS MAVEN_TOOL_CHAIN +COPY pom.xml /tmp/ +RUN mvn -B dependency:go-offline -f /tmp/pom.xml -s /usr/share/maven/ref/settings-docker.xml +COPY src /tmp/src/ +WORKDIR /tmp/ +RUN mvn -B -s /usr/share/maven/ref/settings-docker.xml package + +FROM java:8-jre-alpine + +EXPOSE 8080 + +RUN mkdir /app +COPY --from=MAVEN_TOOL_CHAIN /tmp/target/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] + +{% endraw %} +{% endhighlight %} + +The Codefresh pipeline that builds this Dockerfile is the following: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'multi-stage' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +The beauty of this pipeline is that it is *exactly the same* for all multistage Dockerfiles regardless of the programming language that you use. So even if the Dockerfile was packaging a Node or Go application, the pipeline is oblivious to it. + +{% include image.html +lightbox="true" +file="/images/guides/build-docker-images/multi-stage-pipeline.png" +url="/images/guides/build-docker-images/multi-stage-pipeline.png" +alt="Multi-stage Docker builds" +caption="Multi-stage Docker builds" +max-width="100%" +%} + +You can find multistage build examples for other programming languages in the [example section]({{site.baseurl}}/docs/example-catalog/ci-examples/examples/). + + +## Creating self-contained Docker images + +Even though multistage Dockerfiles are the optimal way to build Docker images, Codefresh still supports "plain" Dockerfiles which do not have multiple stages. + +As an example, this Dockerfile for a Python application is created from a single parent image (although we use the slim variant to make the final image size smaller). + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM python:3.6-slim + +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 +RUN mkdir /code +WORKDIR /code +RUN pip install --upgrade pip +COPY requirements.txt /code/ + +RUN pip install -r requirements.txt +COPY . /code/ + +EXPOSE 8000 + +CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"] +{% endraw %} +{% endhighlight %} + + +This Dockerfile can be built in the same way as a multistage one. We still need two pipeline steps, one to check out the code and another to build the Docker image. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefreshdemo/cf-example-python-django' + revision: master + git: github + build_my_image: + title: Building Docker Image + stage: build + type: build + image_name: my-django-image + working_directory: ./ + tag: master + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +The pipeline is similar to the previous one, so you can handle multistage and non-multistage builds in the same manner in Codefresh pipelines. + +{% include image.html +lightbox="true" +file="/images/guides/build-docker-images/non-multi-stage-pipeline.png" +url="/images/guides/build-docker-images/non-multi-stage-pipeline.png" +alt="Non Multi-stage Docker builds" +caption="Non Multi-stage Docker builds" +max-width="100%" +%} + +It is important however to note that the Dockerfile is still self-contained. It depends only on the source code of the application and all instructions needed to package the code are included in the Dockerfile itself. + + + +## Packaging existing artifacts in Docker images + +An alternative way to create Docker images is to just package an existing artifact or application which is created earlier in the CI process. + +>Though this is a very popular way to create Dockerfiles, and Codefresh supports it, we do **NOT** recommend writing Dockerfiles like this. Please learn about Docker multistage builds if you are not familiar with them. + +You can see this pattern in all kinds of Dockerfiles that assume the application is already there (or that dependencies are already downloaded). Here is a [Dockerfile that packages an existing JAR]({{site.baseurl}}/docs/example-catalog/ci-examples/java/spring-boot-2/#spring-boot-2-and-docker-package-only) file. + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM java:8-jre-alpine + +EXPOSE 8080 + +RUN mkdir /app +COPY target/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] + +HEALTHCHECK --interval=1m --timeout=3s CMD wget -q -T 3 -s http://localhost:8080/actuator/health/ || exit 1 +{% endraw %} +{% endhighlight %} + +If you have Dockerfiles like this you need to enrich the basic pipeline shown in the previous sections and run a freestyle step that prepares the artifact **BEFORE** building the Docker image. Read more about [freestyle steps in the basic CI process]({{site.baseurl}}/docs/ci-cd-guides/packaging-compilation/). + + +There are several disadvantages to these kinds of Dockerfiles: + +* The Dockerfile is not self-contained anymore. You need to manually run some other command before actually running the Docker build +* A person who wants to build the Docker image on their workstation is also forced to have a full dev environment (e.g. the JDK or Node.js) +* The version of a development tool is specified twice (one in the Dockerfile and one in the CI/CD system) + +Here is the Codefresh pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - compile + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + run_unit_tests: + title: Compile/Unit test + stage: compile + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'non-multi-stage' + dockerfile: Dockerfile.only-package +{% endraw %} +{% endhighlight %} + +This pipeline has an intermediate [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that runs a specific version of Maven/JDK to create the JAR file. The JAR file is then available to the next step via [the Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). + +{% include image.html +lightbox="true" +file="/images/guides/build-docker-images/package-only-pipeline.png" +url="/images/guides/build-docker-images/package-only-pipeline.png" +alt="Package only Docker builds" +caption="Package only Docker builds" +max-width="100%" +%} + +In the example above, you can see that the version of JDK/JRE is mentioned twice (one in the pipeline and one in the Dockerfile). If developers decide to upgrade to Java 11, they need to change both places (and in big companies pipelines are usually managed by operators). If this was a multistage build then a developer could simply change just the Dockerfile and be certain that the pipeline is "upgraded" as well. + +We find that similar workflows are from legacy CI solutions that are VM-based. Codefresh is a container-native solution, so if you have the opportunity you should create your pipelines from scratch when switching to Docker-based pipelines. + + +## Avoiding non-standard Dockerfiles + +We already established in the previous section that Dockerfiles should be self-contained. Another best practice is to make sure that all actions inside a Dockerfile are idempotent. + +There are several Dockerfiles that attempt to mimic a CI/CD system and perform non-standard actions such as: + +* Performing Git commits or other Git actions +* Cleaning up or tampering with database data +* Calling other external services with POST/PUT operations + +Not only does this make the pipeline much more complex (because retrying the pipeline now has consequences), but you also need to pass special credentials in the Dockerfile itself via the pipeline, making the pipeline even more complicated. + +You should avoid these kinds of directives inside a Dockerfile and simplify it so that all actions inside it are repeatable and non-destructive. +A Dockerfile should mainly: + +* Clone extra source code (if needed) +* Download dependencies +* Compile/package code +* Process/Minify/Transform local resources +* Run scripts and edit files on the container filesystem only + +As an example **TO AVOID**, this Dockerfile is also trying to run a SonarQube analysis + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM newtmitch/sonar-scanner AS sonar +COPY src src +RUN sonar-scanner +FROM node:11 AS build +WORKDIR /usr/src/app +COPY . . +RUN yarn install \ + yarn run lint \ + yarn run build \ + yarn run generate-docs +{% endraw %} +{% endhighlight %} + +This Dockerfile has the following issues: + +* It can run only where a SonarQube installation is available +* It needs extra credentials for the SonarQube instance +* If the SonarQube installation has issues, then the application build will also fail + +The proper way to build this Dockerfile is to make it package just the application: + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM node:11 AS build +WORKDIR /usr/src/app +COPY . . +RUN yarn install \ + yarn run lint \ + yarn run build \ + yarn run generate-docs +{% endraw %} +{% endhighlight %} + +And then move the SonarQube part to the actual pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - sonar + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'my-github-repo/my-node-app' + revision: master + run_sonarqube: + title: Run SonarQube Analysis + stage: sonar + image: 'newtmitch/sonar-scanner' + environment: + - SONAR_TOKEN=my-sonar-token + commands: + - cd src + - sonar-scanner + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: my-node-image + working_directory: ./ + tag: 'master' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This makes the Docker build step as simple as possible. + +For more Docker best practices see our [Docker anti-patterns blog post](https://codefresh.io/containers/docker-anti-patterns/){:target="\_blank"}. + +## Pushing Docker images + +The build step in Codefresh is very smart and automatically also pushes your Docker image to your [default Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/#the-default-registry). + + +{% include image.html +lightbox="true" +file="/images/guides/build-docker-images/automatic-docker-push.png" +url="/images/guides/build-docker-images/automatic-docker-push.png" +alt="Automatic Docker push" +caption="Automatic Docker push" +max-width="80%" +%} + +Thus, if you run any of the above pipelines you can see the image created in the Docker image dashboard. + + +{% include image.html +lightbox="true" +file="/images/guides/build-docker-images/docker-image-dashboard.png" +url="/images/guides/build-docker-images/docker-image-dashboard.png" +alt="Docker image dashboard" +caption="Docker image dashboard" +max-width="80%" +%} + +For more details on how to push Docker images see the [working with Docker registries page]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/). + +## Running Docker images + +You can run Docker images inside a Codefresh pipeline using freestyle steps. You can use the freestyle step to run either an existing image from a private or public registry or even a Docker image that was created in the pipeline itself. + +This is a [very common pattern in Codefresh]({{site.baseurl}}/docs/pipelines/steps/freestyle/#dynamic-freestyle-steps) and works by simply mentioning the name of the build step that created the image. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'my-github-repo/my-helper-project' + revision: master + my_testing_tools: + title: Building Docker Image + type: build + image_name: my-own-testing-framework + run_tests: + title: Running Unit tests + image: ${{my_testing_tools}} + commands: + - ./my-unit-tests.sh +{% endraw %} +{% endhighlight %} + +For more details see [dynamic build tools]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#creating-docker-images-dynamically-as-build-tools), and [context variables]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables) + + + +## Related articles +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) + + + + + + diff --git a/_docs/ci-cd-guides/environment-deployments.md b/_docs/ci-cd-guides/environment-deployments.md new file mode 100644 index 000000000..bc845d97d --- /dev/null +++ b/_docs/ci-cd-guides/environment-deployments.md @@ -0,0 +1,690 @@ +--- +title: "Deploying to predefined environments" +description: "Deploy to different production and staging environments from Codefresh pipelines" +group: ci-cd-guides +toc: true +--- + +With Codefresh, you can deploy a single application to multiple environments, such as, qa, staging, prod, and manage all of them with single or multiple pipelines. +This guide describes how an example application can be deployed with different configurations and various workflows for handling environment deployment. + +{% include image.html +lightbox="true" +file="/images/guides/promotion/image-promotion.png" +url="/images/guides/promotion/image-promotion.png" +alt="Using multiple environments" +caption="Using multiple environments" +max-width="80%" +%} + +## Prerequisites + +Before starting, you will need to: + + 1. [Create a Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) + 1. Get access to a Kubernetes cluster on any cloud provider + 1. [Connect the Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) to your account + 1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/){:target="\_blank"} and [helm](https://helm.sh/docs/intro/install/):target="\_blank"} and point them to your cluster + 1. Have [Docker](https://docs.docker.com/get-docker/){:target="\_blank"} installed locally (optional) + +## Example application + +As a running example, we will use a simple application with a Helm chart. [Helm is the package manager]({{site.baseurl}}/docs/deployments/helm/helm-best-practices/) for Kubernetes and has built-in support for passing different configuration settings for each environment. + +You can find the example Helm application at [https://github.com/codefresh-contrib/helm-promotion-sample-app](https://github.com/codefresh-contrib/helm-promotion-sample-app){:target="\_blank"}. If you want to follow along feel free to fork it on your own account. + +The application is a web page that prints out its own configuration as loaded from `/config/settings.ini`. +You can run the application locally on your own workstation with: + +``` +git clone https://github.com/codefresh-contrib/helm-promotion-sample-app.git +cd helm-promotion-sample-app +docker build . -t my-app +docker run -p 8080:8080 my-app +``` + +and then visit `http://localhost:8080` in your browser. + +In this example, we use a settings file in the [INI format](https://en.wikipedia.org/wiki/INI_file){:target="\_blank"}, but the same things apply with other configuration methods such as env files, Java properties, YAML/JSON configurations etc. + +### Different environment configurations + +The application includes a [Helm chart](https://github.com/codefresh-contrib/helm-promotion-sample-app/tree/master/chart/sample-app){:target="\_blank"} that contains values for three different environments: + +* [values-qa.yaml](https://github.com/codefresh-contrib/helm-promotion-sample-app/blob/master/chart/values-qa.yaml){:target="\_blank"} for the "QA" environment +* [values-staging.yaml](https://github.com/codefresh-contrib/helm-promotion-sample-app/blob/master/chart/values-staging.yaml){:target="\_blank"} for the "Staging" environment +* [values-prod.yaml](https://github.com/codefresh-contrib/helm-promotion-sample-app/blob/master/chart/values-prod.yaml){:target="\_blank"} for the "Production" environment + +The values contained in the files are both for the application (e.g. payment service URL), as well as the infrastructure level (number of replicas inside the cluster). +Note that the values for the application are dummy values that are not actually used by the application (they are simply shown in the web page). The number of replicas will take real effect on the cluster (the production configuration defines 2 replicas instead of 1). + +>For simplicity reasons, the chart of the application is hosted in the same Git repository as the source code. As an alternative, you could also have a second Git repository with just the chart. Codefresh supports both ways. + +### Manual deployment to different environments + +First let's run the application manually in all three environments. Later we will automate the whole process with Codefresh pipelines. We wil create each environment as a namespace in the cluster: + +``` +kubectl create namespace qa +kubectl create namespace staging +kubectl create namespace production +``` + +Then we will install a copy on the application on each environment with the different values + +``` +git clone https://github.com/codefresh-contrib/helm-promotion-sample-app.git +cd helm-promotion-sample-app/chart +helm install example-qa sample-app -n qa -f values-qa.yaml +helm install example-staging sample-app -n staging -f values-staging.yaml +helm install example-prod sample-app -n production -f values-prod.yaml +``` + +At this point all three copies of the application should be up. You might need to wait some time until all the load balancers are up. You can see the running URLs with: + +``` +kubectl get service -A +``` + +If you visit the URL of each service in your browser you will see how the application looks in each environment. + +{% include image.html +lightbox="true" +file="/images/guides/promotion/different-settings.png" +url="/images/guides/promotion/different-settings.png" +alt="Settings per environment" +caption="Settings per environment" +max-width="50%" +%} + +Note that the application uses a [Load Balancer](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/){:target="\_blank"} and this means extra costs on your cloud provider. When you are ready to clean up the application run the following: + +``` +helm uninstall example-staging -n staging +helm uninstall example-prod -n production +helm uninstall example-qa -n qa +``` + +Note that for this guide, all three environments run on the same cluster. In a real application, you should use a separate cluster for production, and never mix production and non-production workloads. Also notice that the chart refers to the `latest` tag of the application container which is **NOT** a recommended practice. In a real application the chart should specify a specific tag that is versioned. + +## Basic deployment pipeline for different environments + +Now that we have seen how manual deployment works, let's automate the whole process with Codefresh. We [will create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) that: + +1. Deploys all commits to the `master` branch in the production environment +1. Deploys all other commits to the staging environment + +Here is a commit to master looks like: + +{% include image.html +lightbox="true" +file="/images/guides/promotion/production-deployment.png" +url="/images/guides/promotion/production-deployment.png" +alt="Production deployment" +caption="Production deployment" +max-width="80%" +%} + +This is a very simple workflow perfect for small teams that follow Continuous Deployment. You can use the same pattern in other workflows such as [trunk based development]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#trunk-based-development). + +The pipeline has the following steps + +1. A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to get the source code plus the Helm chart +1. A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create and push the container image to Dockerhub +1. A [Helm step]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) to perform the deployment. The step has [pipeline conditions]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) to select which environment will be used. + +Here is the full pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "deployment" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/helm-promotion-sample-app" + revision: '${{CF_REVISION}}' + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/helm-promotion-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + deployStaging: + title: Deploying to Staging + type: helm + stage: deployment + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-staging + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: staging + custom_value_files: + - ./chart/values-staging.yaml + when: + branch: + ignore: + - master + deployProd: + title: Deploying to Production + type: helm + stage: deployment + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-prod + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: production + custom_value_files: + - ./chart/values-prod.yaml + when: + branch: + only: + - master +{% endraw %} +{% endhighlight %} + +To test the pipeline and see how it behaves with different environments: + +1. Fork the [Git repository](https://github.com/codefresh-contrib/helm-promotion-sample-app){:target="\_blank"} to your own GitHub account +1. Commit a dummy change in the `master` branch, and you will see a deployment to the production namespace +1. Commit a dummy change to the `staging` branch or any other branch of your choosing, and you will see a deployment to the staging namespace. + +Here is how the pipeline looks when a commit happens to a branch that is not `master`: + +{% include image.html +lightbox="true" +file="/images/guides/promotion/non-production-deployment.png" +url="/images/guides/promotion/non-production-deployment.png" +alt="Staging deployment" +caption="Staging deployment" +max-width="80%" +%} + +As you can see the step that deploys to production is now skipped, and the step that deploys to staging is enabled. + +This is a great starting point for your own workflows. Codefresh can handle more complicated scenarios as you will see in the later sections. + +>Note that for brevity reasons, the pipeline deploys the Helm chart directly from the Git repo. In an actual pipeline, you [should also store the Helm chart +in a Helm repository]({{site.baseurl}}/docs/ci-cd-guides/helm-best-practices/#packagepush-and-then-deploy). + +For more details on Helm deployments see our [dedicated Helm example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). + +## Viewing your Helm Releases + +The previous pipeline works great as an automation mechanism. Wouldn't it be great if you could also *visualize* your deployments? +Codefresh includes a [Helm release dashboard]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) to help you understand your deployments. + +1. In the Codefresh UI, from the Ops section in the sidebar, select [Helm Releases](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/guides/promotion/helm-releases.png" +url="/images/guides/promotion/helm-releases.png" +alt="Helm releases" +caption="Helm releases" +max-width="80%" +%} + +{:start="2"} +1. To get extra information such as the services exposed and active replicas for a release, click on the release. + +{% include image.html +lightbox="true" +file="/images/guides/promotion/services.png" +url="/images/guides/promotion/services.png" +alt="Helm service information" +caption="Helm service information" +max-width="80%" +%} + + In the History tab, you can view the deployment history, and even [rollback]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/#rolling-back-a-helm-release) to a previous release: + +{% include image.html +lightbox="true" +file="/images/guides/promotion/history.png" +url="/images/guides/promotion/history.png" +alt="Helm deployment history" +caption="Helm deployment history" +max-width="80%" +%} + + And most importantly in the Values tab, the values applied for each release. + This way you can also verify that the correct values are applied to the respective environment. + +{% include image.html +lightbox="true" +file="/images/guides/promotion/helm-values.png" +url="/images/guides/promotion/helm-values.png" +alt="Helm values used" +caption="Helm values used" +max-width="80%" +%} + + + + +## Using the Environment dashboard +Codefresh also includes an optional [environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) that you can use to track down your environments and their current status. The dashboard is especially helpful if you have a large number of environments. + +{% include +image.html +lightbox="true" +file="/images/guides/environments/environments.png" +url="/images/guides/environments/environments.png" +alt="Codefresh Environment Dashboard" +caption="Codefresh Environment Dashboard" +max-width="70%" +%} + + +To activate your environment dashboard you need to add an [env block]({{site.baseurl}}/docs/pipelines/deployment-environments/) to each of the deployment steps in the pipeline. +Here is the whole pipeline: + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "deployment" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/helm-promotion-sample-app" + revision: '${{CF_REVISION}}' + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/helm-promotion-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + deployStaging: + title: Deploying to Staging + type: helm + stage: deployment + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-staging + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: staging + custom_value_files: + - ./chart/values-staging.yaml + when: + branch: + ignore: + - master + env: + name: Acme Staging + endpoints: + - name: app + url: https://staging.example.com + type: helm-release + change: ${{CF_COMMIT_MESSAGE}} + filters: + - cluster: 'mydemoAkscluster@BizSpark Plus' + releaseName: example-staging + deployProd: + title: Deploying to Production + type: helm + stage: deployment + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-prod + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: production + custom_value_files: + - ./chart/values-prod.yaml + when: + branch: + only: + - master + env: + name: Acme Production + endpoints: + - name: app + url: https://production.example.com + type: helm-release + change: ${{CF_COMMIT_MESSAGE}} + filters: + - cluster: 'mydemoAkscluster@BizSpark Plus' + releaseName: example-prod +{% endraw %} +{% endhighlight %} + + +Notice that we use the `CF_COMMIT_MESSAGE` [variable]({{site.baseurl}}/docs/pipelines/variables/) to annotate each environment with each build message. After you deploy at least once to each environment, you should see the following in your [Environment dashboard](https://g.codefresh.io/environments). + +{% include image.html +lightbox="true" +file="/images/guides/promotion/deployment-dashboard.png" +url="/images/guides/promotion/deployment-dashboard.png" +alt="Environment inspection" +caption="Environment inspection" +max-width="80%" +%} + +Just by looking at the builds of each environment, it is clear that the staging environment is one commit ahead (for feature 4689). +Clicking an environment shows several details such as active services, deployment history, rollback options, manifests rendered etc as in the Helm releases page. + +## Using Approvals in a pipeline + +Deploying straight to production after a commit is a worthy goal, but not all organizations want to work like this. In several cases, a human must approve a production deployment with a manual step. + +An alternative pipeline pattern is to have a single pipeline that automatically deploys to the "staging" environment but pauses before releasing to production. + +{% include image.html +lightbox="true" +file="/images/guides/promotion/with-approval.png" +url="/images/guides/promotion/with-approval.png" +alt="Asking for approval before a production deployment" +caption="Asking for approval before a production deployment" +max-width="80%" +%} + +Once the pipeline is paused, all project stakeholders can examine the state of the application in the staging environment (either manually or by running automated tests), and if everything looks good, promote the application to production. + +This is easily accomplished through the [Codefresh approval step]({{site.baseurl}}/docs/pipelines/steps/approval/). The pipeline is stopped, and a yes/no button is shown in the UI. The pipeline can continue only if approved by selecting `yes`. + +Here is the whole pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "staging" + - "prod" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/helm-promotion-sample-app" + revision: '${{CF_REVISION}}' + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/helm-promotion-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + deployStaging: + title: Deploying to Staging + type: helm + stage: staging + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-staging + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: staging + custom_value_files: + - ./chart/values-staging.yaml + askForPermission: + type: pending-approval + stage: prod + title: Deploy to production? + deployProd: + title: Deploying to Production + type: helm + stage: prod + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-prod + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: production + custom_value_files: + - ./chart/values-prod.yaml +{% endraw %} +{% endhighlight %} + +The approval step has many more options such as a timeout or even choosing a different flow in the pipeline if the approval is declined. + +## Using multiple pipelines for deployments + +Having a single pipeline that deals with all deployment environments can work great with a small team. As an organization grows, and more steps are added to the pipeline, it becomes very hard to use conditions to enable/disable specific steps in pipelines. + +With Codefresh, you can create as many pipelines as you want for a single project. It is therefore very easy to employ different simple pipelines for specific purposes, instead of working with a complex monolithic pipeline. + +In our example we will create two pipelines: + +1. The "staging" pipeline performs linting and security scans in the source code before creating the Docker image +1. The "production" pipeline runs integration tests *after* the creation of the Docker image + +Here is how the staging pipeline looks: + +{% include image.html +lightbox="true" +file="/images/guides/promotion/staging-pipeline.png" +url="/images/guides/promotion/staging-pipeline.png" +alt="A pipeline only for staging deployments" +caption="A pipeline only for staging deployments" +max-width="80%" +%} + +This pipeline uses [parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/#inserting-parallel-steps-in-a-sequential-pipeline) to run linting and security scanning at the same time. + +Here is the whole pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "validate" + - "build" + - "staging" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/helm-promotion-sample-app" + revision: '${{CF_REVISION}}' + stage: "clone" + prechecks: + type: parallel + stage: 'validate' + steps: + lint: + title: Lint + working_directory: "${{clone}}" + image: golangci/golangci-lint:v1.33.0 + commands: + - golangci-lint run -v . + securityAnalysis: + title: Security Scan + working_directory: "${{clone}}" + image: 'securego/gosec:v2.5.0' + commands: + - gosec ./... + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/helm-promotion-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + + deployStaging: + title: Deploying to Staging + type: helm + stage: staging + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-staging + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: staging + custom_value_files: + - ./chart/values-staging.yaml +{% endraw %} +{% endhighlight %} + +The production pipeline assumes that the code has been scanned/validated already, and runs some integration tests as a final validation check before deploying the release to production: + +{% include image.html +lightbox="true" +file="/images/guides/promotion/production-pipeline.png" +url="/images/guides/promotion/production-pipeline.png" +alt="A pipeline only for production deployments" +caption="A pipeline only for production deployments" +max-width="80%" +%} + +This pipeline uses [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) to run [integration tests]({{site.baseurl}}/docs/testing/integration-tests/). + +Here is the whole pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "testing" + - "prod" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/helm-promotion-sample-app" + revision: '${{CF_REVISION}}' + stage: "clone" + build_app_image: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/helm-promotion-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + myTests: + title: Integration Tests + type: freestyle + working_directory: "${{clone}}" + stage: "testing" + arguments: + image: 'byrnedo/alpine-curl' + commands: + - "curl http://app:8080/health" + services: + composition: + app: + image: '${{build_app_image}}' + ports: + - 8080 + deployProd: + title: Deploying to Production + type: helm + stage: prod + working_directory: ./helm-promotion-sample-app + arguments: + action: install + chart_name: ./chart/sample-app + release_name: example-prod + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' + namespace: production + custom_value_files: + - ./chart/values-prod.yaml +{% endraw %} +{% endhighlight %} + +Now that you have created the pipelines, you have several options on how to trigger them. +Some common workflows are: + +1. Automate the staging pipeline when a commit lands in `master`, and only launch the production pipeline manually. +1. Automate the staging pipeline when a commit lands in `master`, and use an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to call the production pipeline as a [child pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/call-child-pipelines/). +1. Set the [trigger]({{site.baseurl}}/docs/pipeline/triggers/git-triggers/) of the production pipeline to [launch only]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#restricting-which-branches-to-build) on `master`, and the trigger of the staging pipeline to launch only for `non-master` branches. +1. Set the production pipeline to launch only for commits on `master`, and the staging pipeline only for pull requests (PRs). + +The exact mechanism depends on the workflow of your team. For more information, see [the guide on branches and pull requests]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/), especially [trunk based development]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#trunk-based-development), as a good starting point. + +## Promoting releases between environments + +If you have a large number of environments, we also suggest looking at the Helm promotion board provided by Codefresh. +For more details, see [Helm promotion board]({{site.baseurl}}/docs/new-helm/helm-environment-promotion/). + + +{% include +image.html +lightbox="true" +file="/images/guides/environments/board.png" +url="/images/guides/environments/board.png" +alt="Helm Promotion Dashboard" +caption="Helm Promotion Dashboard" +max-width="80%" +%} + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pull requests and branches]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/) +[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) + + + + + diff --git a/_docs/ci-cd-guides/first-pipeline.md b/_docs/ci-cd-guides/first-pipeline.md deleted file mode 100644 index 83c252ef6..000000000 --- a/_docs/ci-cd-guides/first-pipeline.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Create your first pipeline" -description: "" -group: ci-cd-guides -toc: true ---- - -Coming soon diff --git a/_docs/ci-cd-guides/gitops-deployments.md b/_docs/ci-cd-guides/gitops-deployments.md new file mode 100644 index 000000000..14f5ad199 --- /dev/null +++ b/_docs/ci-cd-guides/gitops-deployments.md @@ -0,0 +1,687 @@ +--- +title: "GitOps deployments" +description: "Deploy with Codefresh and ArgoCD" +group: ci-cd-guides +toc: true +--- + +Apart from traditional push-based Helm deployments, Codefresh can also be used for [GitOps deployments](https://codefresh.io/gitops/). + +## What is GitOps + +GitOps is the practice of performing Operations via Git only. The main principles of GitOps are the following: + +* The state of the system/application is always stored in Git. +* Git is always the source of truth for what happens in the system. +* If you want to change the state of the system you need to perform a Git operation such as creating a commit or opening a pull request. Deployments, tests, and rollbacks controlled through git flow. +* Once the Git state is changed, then the cluster (or whatever your deployment target is) state should match what is described in the Git repository. +* No hand rolled deployments, no ad-hoc cluster changes, no live configuration changes are allowed. If a change needs to happen, it must be committed to Git first. + +GitOps deployments have several advantages compared to traditional imperative deployments. The main one is that the Git repo represents the state of the system, and Git history +is essentially the same thing as deployment history. Rollbacks are very easy to perform by simply using a previous Git hash. + +Even though GitOps is not specific to Kubernetes, current GitOps tools work great with Kubernetes in the form of cluster controllers. The GitOps controller monitors the state of the Git repository and when a commit happens, the cluster is instructed to match the same state. + +Codefresh has native support for GitOps including a graphical dashboard for handling your GitOps deployments: + +{% include image.html + lightbox="true" + file="/images/guides/gitops/gitops-dashboard.png" + url="/images/guides/gitops/gitops-dashboard.png" + alt="The GitOps dashboard" + caption="The GitOps dashboard" + max-width="100%" + %} + +This guide will explain how you can use GitOps for your own applications. + +## Setting up your Git Repositories + +One of the central ideas around GitOps is the usage of Git for ALL project resources. Even though developers are familiar with using Git for the source code of the application, adopting GitOps means that you need to store in Git every other resource of the application (and not just the source code). + +In the case of Kubernetes, this means that all Kubernetes manifests should be stored in a Git repository as well. In the most simple scenario you have the main repository of your application (this is mostly interesting to developers) and [a second Git repository with Kubernetes manifests](https://argoproj.github.io/argo-cd/user-guide/best_practices/#separating-config-vs-source-code-repositories) (this is more relevant to operators/SREs). + +As a running example you can use: + +* The [https://github.com/codefresh-contrib/gitops-app-source-code](https://github.com/codefresh-contrib/gitops-app-source-code) repository for the application code +* The [https://github.com/codefresh-contrib/gitops-kubernetes-configuration](https://github.com/codefresh-contrib/gitops-kubernetes-configuration) repository for the Kubernetes configuration +* The [https://github.com/codefresh-contrib/gitops-pipelines](https://github.com/codefresh-contrib/gitops-pipelines) repository that holds the pipelines + +The application code repository contains the source code plus a dockerfile. You can use any Git workflow for this repository. We will set a pipeline in Codefresh that creates a container image on each commit. + +The configuration repository holds the kubernetes manifests. This is one of the critical points of GitOps + +* The configuration repository holds the manifests that are also present in the Kubernetes cluster +* Every time a commit happens to the configuration repository the cluster will be notified to deploy the new version of the files (we will setup a pipeline for this) +* Every subsequent configuration change should become a Git commit. Ad-hoc changes to the cluster (i.e. with `kubectl` commands) are **NOT** allowed + +We also have a third Git repository for pipelines, because pipelines are also part of the application. + +Before continuing fork all 3 repositories in your own GitHub account if don't have already your own example application. + +## Connecting ArgoCD and Codefresh + +GitOps deployments are powered by [ArgoCD](https://argoproj.github.io/argo-cd/) so you need an active ArgoCD installation in your cluster to take advantage of the GitOps dashboard in Codefresh. + +Follow the instructions for [connecting ArgoCD to Codefresh]({{site.baseurl}}/docs/integrations/argocd/) and creating an ArgoCD application + +{% include image.html + lightbox="true" + file="/images/integrations/argocd/argocd-provision-app.png" + url="/images/integrations/argocd/argocd-provision-app.png" + alt="Creating a new ArgoCD application in a Codefresh environment" + caption="Creating a new ArgoCD application in a Codefresh environment" + max-width="40%" + %} + +The options are: + +* Name - User defined name of the Codefresh environment dashboard +* Project - A way to [group/secure applications](https://argoproj.github.io/argo-cd/user-guide/projects/). Choose default if you have only one project in ArgoCD. +* Application - name of application +* Manual/automatic sync - If automatic when a git commit happens, a deployment will automatically take place. +* Use schema - Kubernetes manifests will be checked for correctness before deployed to the cluster +* source repository - Git repository that holds your Kubernetes manifests +* revision - Revision to be checked out when a deployment happens +* path - folder inside the Git repository that should be searched for manifests (if your Git repo has multiple applications). Use `./` if all your manifests are in the root folder. +* cluster - Kubernetes cluster when deployment will take place +* namespace - Kubernetes namespace where the application will be deployed to +* directory recurse - whether to check all folders in the Git repository for manifests in a recursive way. + +For a sample application you can use the [https://github.com/codefresh-contrib/gitops-kubernetes-configuration](https://github.com/codefresh-contrib/gitops-kubernetes-configuration) repository. Fork the project in your own GitHub account and use that link in the *Source repository* section. + +Once you connect your application you will see it under in the GitOps application screen in the Codefresh UI. + +## Creating a basic CI Pipeline for GitOps + +Creating a CI pipeline for GitOps is no different than a [standard pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/) that [packages your Docker images]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/), runs [tests]({{site.baseurl}}/docs/testing/unit-tests/), performs [security scans]({{site.baseurl}}/docs/testing/security-scanning/) etc. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/basic-ci-pipeline.png" + url="/images/guides/gitops/basic-ci-pipeline.png" + alt="Basic CI pipeline" + caption="Basic CI pipeline" + max-width="100%" + %} + +To take advantage of the GitOps dashboard facilities you also need to setup the correlation between the Docker image and the Pull Requests/issues associated with it. This correlation happens via [annotations]({{site.baseurl}}/docs/codefresh-yaml/annotations/). The easiest way to annotate your image is by using the [pipeline plugins](https://codefresh.io/steps/) offered by Codefresh for this purpose. Currently we offer the following plugins: + +* [Record Pull Request information](https://codefresh.io/steps/step/image-enricher) +* [Record Jira Issue information](https://codefresh.io/steps/step/jira-issue-extractor) + +Here is an example Pipeline definition: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "metadata" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "my-github-username/gitops-app-source-code" + revision: '${{CF_REVISION}}' + stage: "clone" + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/simple-web-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + enrich-image: + title: Add PR info + type: image-enricher + stage: "metadata" + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:latest + BRANCH: '${{CF_BRANCH}}' + REPO: 'kostis-codefresh/simple-web-app' + GIT_PROVIDER_NAME: github-1 + jira-issue-extractor: + title: Enrich image with jira issues + type: jira-issue-extractor + stage: "metadata" + fail_fast: false + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:latest + JIRA_PROJECT_PREFIX: 'SAAS' + MESSAGE: SAAS-8431 + JIRA_HOST: codefresh-io.atlassian.net + JIRA_EMAIL: kostis@codefresh.io + JIRA_API_TOKEN: '${{JIRA_TOKEN}}' +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. Checks out the source code of an application with the [git-clone step]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) +1. [Builds]({{site.baseurl}}/docs/codefresh-yaml/steps/build/) a docker image +1. Annotates the Docker image with the Pull Request information provided by Github +1. Annotates the Docker image with a specific Jira issue ticket + +You can see the associated metadata in your [Docker image dashboard](https://g.codefresh.io/images/) + + {% include image.html + lightbox="true" + file="/images/guides/gitops/image-annotations.png" + url="/images/guides/gitops/image-annotations.png" + alt="Enriched Docker image" + caption="Enriched Docker image" + max-width="80%" + %} + +Codefresh is using this information to fill the deployment history in the GitOps dashboard. + +## Creating a basic CD Pipeline for GitOps + +To create a CD pipeline in Codefresh that is responsible for GitOps deployments you must first disable the auto-sync behavior of ArgoCD. You can disable auto-sync either from the GUI or via the [command line](https://argoproj.github.io/argo-cd/user-guide/auto_sync/): + + {% include image.html + lightbox="true" + file="/images/guides/gitops/disable-auto-sync.png" + url="/images/guides/gitops/disable-auto-sync.png" + alt="Basic CD pipeline" + caption="Basic CD pipeline" + max-width="80%" + %} + + With the auto-sync behavior disabled, all Git pushes that happen on the GitOps repo will be ignored by ArgoCD (however ArgoCD will still mark your application as out-of-sync). + + You can now [create a new pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/) in Codefresh using a [standard Git trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) that will monitor the GitOps repository for updates. This way Codefresh is responsible for the GitOps process instead of Argo. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/argo-sync-pipeline.png" + url="/images/guides/gitops/argo-sync-pipeline.png" + alt="Basic CD pipeline" + caption="Basic CD pipeline" + max-width="80%" + %} + +The big advantage here is that you can construct a full pipeline over the sync process with multiple steps before or after the sync. For example you could run some smoke tests after the deployment takes place. Here is an example pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "pre sync" + - "sync app" + - "post sync" + +steps: + pre_sync: + title: "Pre sync commands" + type: "freestyle" # Run any command + image: "alpine:3.9" # The image in which command will be executed + commands: + - echo "Sending a metrics marker" + stage: "pre sync" + sync_and_wait: + title: Sync ArgoCD app and wait + type: argocd-sync + arguments: + context: "argo-cd" + app_name: "${{ARGOCD_APP_NAME}}" + wait_healthy: true + stage: "sync app" + post_sync: + title: "Post sync commands" + type: "freestyle" # Run any command + image: "alpine:3.9" # The image in which command will be executed + commands: + - echo "running smoke tests" + stage: "post sync" +{% endraw %} +{% endhighlight %} + +The pipeline is using the [argo-sync plugin](https://codefresh.io/steps/step/argocd-sync) that can be used by Codefresh to start the sync process of an application from the Git repo to the cluster. + +The name of the `context` parameter should be the same name you used for your [ArgoCD integration]({{site.baseurl}}/docs/integrations/argocd/). + + {% include image.html + lightbox="true" + file="/images/guides/gitops/argo-context.png" + url="/images/guides/gitops/argo-context.png" + alt="Using the Argo integration name as a context" + caption="Using the Argo integration name as a context" + max-width="80%" + %} + +The name of the application should be the same name as the ArgoCD Application. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/argo-application-name.png" + url="/images/guides/gitops/argo-application-name.png" + alt="Argo Application name" + caption="Argo Application name" + max-width="80%" + %} + + You can use pipeline variables or any other familiar Codefresh mechanism such as [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/). + + Once the pipeline has finished running the sync status will updated in your GitOps dashboard to reflect the current state. + +## Working with the GitOps Dashboard + +After you create an ArgoCD application, you can click on it in the [GitOps environment overview](https://g.codefresh.io/gitops) and see the respective GitOps screen. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/real-dashboard.png" + url="/images/guides/gitops/real-dashboard.png" + alt="GitOps Dashboard" + caption="GitOps Dashboard" + max-width="100%" + %} + +This dashboard is the central place for monitoring your application and contains the following information: + +1. Current health and sync status +1. Deployment graph that shows successful/failed deployments on the selected time period +1. Complete history of deployments according to Git hash. For each deployment you can also see which Pull Request was used for the commit, who was the committer and which JIRA issues this Pull request is solving (provided that the image was built by a Codefresh pipeline) +1. The Kubernetes services that belong to this application (on the services tab) +1. What services and replicas were updated with each deployment. + +The deployment status is fetched from your ArgoCD integration in a live manner. If, at any point, the deployment is not synced with GIT, you will instantly see the out-of-sync status. You will get the number of resources that are out of sync. When you click the out-of-sync status, you will get a list of all resources in that status. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/out-of-sync.png" + url="/images/guides/gitops/out-of-sync.png" + alt="Out of sync status" + caption="Out of sync status" + max-width="60%" + %} + +For each Git hash Codefresh associates the respective Pull Request and Jira issue(s) that affected deployment. To achieve this correlation, Codefresh is enriching the Docker image(s) of the service during the CI process. + +You can manually create these annotations with the [standard Codefresh annotation support]({{site.baseurl}}/docs/codefresh-yaml/annotations/) or via the built-in pipeline steps that we will see in the next section. + +You can find helpful tips if you hover your mouse on the PR number, the issue, the Git commiter and so on. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/tooltips.png" + url="/images/guides/gitops/tooltips.png" + alt="Extra tooltip information" + caption="Extra tooltip information" + max-width="80%" + %} + +For each deployment you can also see a before/after view of the pods/replicas that were affected. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/updated-services.png" + url="/images/guides/gitops/updated-services.png" + alt="Updated services" + caption="Updated services" + max-width="100%" + %} + +### Filtering the Deployment History + +You can add filters on the deployment history by using the multi-select field on the top left of the screen. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/filter.png" + url="/images/guides/gitops/filter.png" + alt="Filtering options" + caption="Filtering options" + max-width="40%" + %} + + You can add filters for: + +* Git committer(s) +* Pull Request number(s) +* Jira issue(s) + + If you define multiple options they work in an OR manner. + +### Searching the Deployment History + +For advanced filtering options, the search field on the top right allows you to view only the subset of deployments that match your custom criteria. + +Apart from direct text search, the text field also supports a simple query language with the following keywords: + +* `issues` +* `issue` +* `prs` +* `pr` +* `committer` +* `committers` +* `service` +* `services` +* `image` +* `images` +* `status` +* `statuses` + +The following characters serve as delimiters + +* `:` define the value for a keyword +* `,` define multiple values for a single keyword +* `;` define multiple criteria + +{% include image.html + lightbox="true" + file="/images/guides/gitops/search.png" + url="/images/guides/gitops/search.png" + alt="Searching deployment history" + caption="Searching deployment history" + max-width="80%" + %} + +Some examples are: + +* `pr:2` - filter the deployment history to show only a specific Pull request +* `issues: SAAS-2111, SAAS-2222` - show only specific issues +* `issue: SAAS-2111; pr:3 ; service: my-app` - searching for multiple criteria in OR behavior + +Using the search field allows you to quickly find a specific Git commit in the history of the application (and even rollback the deployment as explained in the next sections). + +## Current State of Application + +The current state tab shows a hierarchical view of your cluster resource for your application. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/currentstate.png" + url="/images/guides/gitops/currentstate.png" + alt="Current State tab" + caption="Current State tab" + max-width="80%" + %} + +At the top of the screen you have several filters available: + +* Kind - choose a specific type of Kubernetes resource +* Health - status of the resource +* Sync state - GitOps status of the resource +* Free search - search any resource by name + +## Tagging GitOps Application + +1. Navigate to the GitOps dashboard. +2. To the application's right (next to the Health Column), click the three dots to open the More Action Dropdown. +3. Select Add/Edit Tags. +4. Click the +tags to add tags. +5. Alternatively, click the "x" next to the tag to remove it. +6. Click Save. + +## Rolling Back Git Versions + +In the GitOps dashboard you will also see a complete history of all past deployments as recorded in Git. You can select any of the previous versions and rollback your application to the respective version. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/rollback.png" + url="/images/guides/gitops/rollback.png" + alt="Rolling back to a previous version" + caption="Rolling back to a previous version" + max-width="80%" + %} + +The Rollback simply informs the cluster to use a different git hash for the sync process. It doesn't affect your Git repository and ArgoCD will now show your application as out-of-sync (because the last Git commit no longer matches the status of the cluster). + +This rollback behavior is best used as an emergency measure after a failed deployment where you want to bring the cluster back to a previous state in a temporary manner. If you wish to keep the current rollback status as a permanent status it is best to use the standard `git reset/revert` commands and change the GitOps repository to its desired state. + +## Gitops ABAC Support For Rollback Action + +1. Go to Account Settings > Permissions > Teams Tab > Gitops. +2. Select the Team. +3. Chose what the Team can do and click apply. +4. Select the tags of the applications and click apply. +5. Click Add Rule when done. + +## Performing Automatic Git Commits + +Usually the Pull Requests that take part in a GitOps workflow are created and approved in a manual way (after code review). You have the option however to fully automate the whole process and rather than opening a Pull Request on both the application repository and the manifest repository, commit automatically the manifest changes inside the pipeline that creates the artifact. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/gitops-workflow.png" + url="/images/guides/gitops/gitops-workflow.png" + alt="Full GitOps workflow" + caption="Full GitOps workflow" + max-width="100%" + %} + +Here is an example pipeline that creates a Docker image and also commits a version change in the Kubernetes manifest to denote the new Docker tag of the application: + +{% include image.html + lightbox="true" + file="/images/guides/gitops/ci-cd-pipeline.png" + url="/images/guides/gitops/ci-cd-pipeline.png" + alt="Pipeline that commits manifests" + caption="Pipeline that commits manifests" + max-width="80%" + %} + +There are many ways to change a Kubernetes manifest in a programmatic way, and for brevity reasons we use the [yq](https://github.com/mikefarah/yq) command line tool. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "metadata" + - "gitops" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "my-github-username//gitops-app-source-code" + revision: '${{CF_REVISION}}' + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/simple-web-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + enrich-image: + title: Add PR info + type: image-enricher + stage: "metadata" + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:${{CF_SHORT_REVISION}} + BRANCH: '${{CF_BRANCH}}' + REPO: 'kostis-codefresh/simple-web-app' + GIT_PROVIDER_NAME: github-1 + jira-issue-extractor: + title: Enrich image with jira issues + type: jira-issue-extractor + stage: "metadata" + fail_fast: false + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:${{CF_SHORT_REVISION}} + JIRA_PROJECT_PREFIX: 'SAAS' + MESSAGE: SAAS-8842 + JIRA_HOST: codefresh-io.atlassian.net + JIRA_EMAIL: kostis@codefresh.io + JIRA_API_TOKEN: '${{JIRA_TOKEN}}' + clone_gitops: + title: cloning gitops repo + type: git-clone + arguments: + repo: 'my-github-username//gitops-kubernetes-configuration' + revision: 'master' + stage: "gitops" + when: + branch: + only: + - master + change_manifest: + title: "Update k8s manifest" + image: "mikefarah/yq:3" # The image in which command will be executed + commands: + - yq w -i deployment.yml spec.template.spec.containers[0].image docker.io/kostiscodefresh/simple-web-app:${{CF_SHORT_REVISION}} + - cat deployment.yml + working_directory: "${{clone_gitops}}" + stage: "gitops" + when: + branch: + only: + - master + commit_and_push: + title: Commit manifest + type: git-commit + stage: "gitops" + arguments: + repo: 'my-github-username//gitops-kubernetes-configuration' + git: github-1 + working_directory: '/codefresh/volume/gitops-kubernetes-configuration' + commit_message: Updated manifest + git_user_name: ${{CF_COMMIT_AUTHOR}} + git_user_email: ${{CF_COMMIT_AUTHOR}}@acme-inc.com + when: + branch: + only: + - master +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. Checks out the Git repository that contains the source code +1. Builds a Docker image and tags it with the Git hash +1. Enriches the image with the Pull request and ticket information as explained in the previous sections +1. Checks out the Git repository that contains the Kubernetes manifests +1. Performs a text replacement on the manifest updating the `containers` segment with the new Docker image +1. Commits the change back using the [Git commit plugin](https://codefresh.io/steps/step/git-commit) to the Git repository that contains the manifests. + +The CD pipeline (described in the previous section) will detect that commit and use the [sync plugin](https://codefresh.io/steps/step/argocd-sync) to instruct ArgoCD to deploy the new tag. Alternatively you can setup the ArgoCD project to auto-sync on its own if it detects changes in the Git repository with the manifests. + +## Using the App-of-Apps pattern + +The GitOps dashboard has native support for the [app-of-apps pattern](https://argo-cd.readthedocs.io/en/stable/operator-manual/cluster-bootstrapping/). If you have a number of applications that are related and you always +install them as a set in your cluster you can group them in a single Application. The parent application can be defined using [declarative Argo Resources](https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/). + +As an example, you might find that you always install in your cluster Linkerd, Prometheus and Ambassador. You can group all of them in a single Application and deploy them all at once. + +You can find an existing example of app-of-apps at [https://github.com/argoproj/argocd-example-apps/tree/master/apps](https://github.com/argoproj/argocd-example-apps/tree/master/apps). It is using [Helm]({{site.baseurl}}/docs/yaml-examples/examples/helm/), but you can use any other Kubernetes templating mechanism such as [Kustomize]({{site.baseurl}}/docs/yaml-examples/examples/deploy-with-kustomize/) (or even plain manifests). + +Once you deploy the application with Codefresh, you will see the parent app in the dashboard with a small arrow: + +{% include image.html + lightbox="true" + file="/images/guides/gitops/app-of-apps-closed.png" + url="/images/guides/gitops/app-of-apps-closed.png" + alt="App of Apps" + caption="App of Apps" + max-width="90%" + %} + +You can expand the application by clicking on the arrow to inspect its child applications. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/app-of-apps.png" + url="/images/guides/gitops/app-of-apps.png" + alt="App of Apps expanded" + caption="App of Apps expanded" + max-width="90%" + %} + + Then you can either click on the parent application or any of the children to visit the respective dashboard. In the dashboard of the parent application, you will also be notified for its components after each deployment under the "Updated Applications" header: + + {% include image.html + lightbox="true" + file="/images/guides/gitops/updated-apps.png" + url="/images/guides/gitops/updated-apps.png" + alt="Children applications" + caption="Children applications" + max-width="90%" + %} + + Note that the app of apps pattern is best used for related but not interdependent applications. If you have applications that depend on each other (e.g. frontend that needs backend and backend that needs a DB) we suggest you use the standard [Helm dependency mechanism](https://helm.sh/docs/helm/helm_dependency/). + +## Integrating Codefresh and Jira + +> Note that Codefresh currently has to provide you with access to use the Jira Marketplace App. Please get in touch for more information. + +Setting up the Codefresh Jira integration provides + +* Higher observability of deployments within your GitOps Dashboard +* Higher observability of deployments within your Jira Account + +[Our integration section]({{site.baseurl}}/docs/integrations/jira) provides further details on ways to set-up the connection. + +Once set-up, you will be able to view information from Jira in the Codefresh GitOps Dashboard. Additionally, Jira will display + +* The build status across environments +* The deployment history +* Tickets and how they correlate to deployments + +The following screenshots show examples of the provided information. Here is the deployments details for a ticket in JIRA: + +{% include image.html +lightbox="true" +file="/images/integrations/jira/jira-integration-one.png" +url="/images/integrations/jira/jira-integration-one.png" +alt="Ticket deployment history" +caption="Ticket deployment history" +max-width="90%" +%} + +And here is a complete timeline of your deployments and the feature they contain. + +{% include image.html +lightbox="true" +file="/images/integrations/jira/jira-integration-two.png" +url="/images/integrations/jira/jira-integration-two.png" +alt="Jira Deployment timeline" +caption="Jira Deployment timeline" +max-width="90%" +%} + +For more information see the [Atlassian Codefresh page](https://www.atlassian.com/solutions/devops/integrations/codefresh) and the [integration documentation]({{site.baseurl}}/docs/integrations/jira/). + +## Using a Git repository for the pipelines + +Remember that according to GitOps we should place *all* application resources on Git. This means that the pipelines themselves must also be present in a Git repository and any change on them should pass from source control. + +Even though Codefresh has a [powerful inline editor]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#using-the-inline-pipeline-editor) for editing pipelines, as soon as you finish with your pipelines you [should commit them in Git](https://github.com/codefresh-contrib/gitops-pipelines) +and load them from the repository. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/pipeline-from-git.png" + url="/images/guides/gitops/pipeline-from-git.png" + alt="Loading a pipeline from GIT" + caption="Loading a pipeline from GIT" + max-width="80%" + %} + + Once the pipeline is in Git, you should switch the online editor to [load the pipeline from the repository]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#loading-codefreshyml-from-version-control) instead of the inline text. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[ArgoCD integration]({{site.baseurl}}/docs/integrations/argocd/) +[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +[Helm promotions]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) diff --git a/_docs/ci-cd-guides/helm-best-practices.md b/_docs/ci-cd-guides/helm-best-practices.md new file mode 100644 index 000000000..f85de4fef --- /dev/null +++ b/_docs/ci-cd-guides/helm-best-practices.md @@ -0,0 +1,366 @@ +--- +title: "Helm best practices" +description: "High-level overview of Helm workflows" +group: ci-cd-guides +redirect_from: + - /docs/new-helm/best-practices/ +toc: true +--- + +[Helm](https://helm.sh){:target="\_blank"} is a package manager for Kubernetes (think `apt` or `yum`). It works by combining several manifests into a single package called [a chart](https://helm.sh/docs/developing_charts/){:target="\_blank"}. +Helm also supports storing charts in remote or local Helm repositories that function as package registries, such as Maven Central, Ruby Gems, NPM registry, etc. + +Helm is currently the only solution that supports: + +* Grouping related Kubernetes manifests in a single entity (the chart) +* Basic templating and values for Kubernetes manifests +* Dependency declaration between applications (chart of charts) +* A registry of available applications to be deployed (Helm repository) +* A view of a Kubernetes cluster at the application/chart level +* Managing of chart installation/upgrades as a whole +* Built-in rollback of a chart to a previous version without running a CI/CD pipeline again + +You can find a list of public curated charts in the default [Helm repository](https://github.com/helm/charts/tree/master/stable){:target="\_blank"}. + +Several third-party tools support Helm chart creation such as [Draft](https://draft.sh/){:target="\_blank"}. Local Helm development +is also supported by [garden.io](https://docs.garden.io/using-garden/using-helm-charts){:target="\_blank"}, and/or [skaffold](https://skaffold.dev/docs/how-tos/deployers/#deploying-with-helm){:target="\_blank"}. Check your favorite tool for native Helm support. + +Codefresh also has built-in support for Helm: +* [Packages]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) +* [Deployments]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +* [Repositories]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +* [Environments]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) + +## Helm concepts + +The [official docs](https://helm.sh/docs/using_helm/){:target="\_blank"} do a good job of explaining the basic concepts. +The table below focuses on some important points. + +Helm Concept|Description| Important point +---|--- | --- +Chart (unpackaged) | A folder with files that follow the Helm chart guidelines. | Can be deployed directly to a cluster | +Chart (packaged) | A `tar.gz` archive of the above. | Can be deployed directly to a cluster | +Chart name | Name of the package as defined in `Chart.yaml` | Part of package identification | +Templates | A set of Kubernetes manifests that form an application. | `Go` templates can be used | +Values | Settings that can be parameterized in Kubernetes manifests. | Used for templating of manifests | +Chart version | The version of the package/chart. | Part of package identification | +App version | The version of the application contained in the chart. | **Independent from chart version** | +Release | A deployed package in a Kubernetes cluster. | **Multiple releases of the same chart can be active**| +Release name | An arbitrary name given to the release. | **Independent from name of chart** | +Release Revision | A number that gets incremented each time an application is deployed/upgraded.| **Unrelated to chart version**| +Repository | A file structure (HTTP server) with packages and an `index.yaml` file. | Helm charts can be deployed **without** being first fetched from a repository | +Installing | Creating a brand-new release from a Helm chart (either unpackaged, packaged or from a repo). | | +Upgrading | Changing an existing release in a cluster | Can be upgraded to any version (even the same) | +Rolling back | Going back to a previous revision of a release. | Helm handles the rollback, no need to rerun pipeline | +Pushing | Storing a Helm package on a repository. | Chart will be automatically packaged | +Fetching | Downloading a Helm package from a repository to the local filesystem. | | + +## Common Helm misconceptions + +Any new technology requires training on how to use it effectively. If you have already worked with any type of package manager, you should be familiar with how Helm works. + +Here is a list of important Helm points that are often controversial between teams. + +### Helm repositories are optional + +Using Helm repositories is a recommended practice, but completely optional. You can deploy a Helm chart to a Kubernetes cluster directly from the filesystem. The [quick start guide]({{site.baseurl}}/docs/quick-start/deploy-with-helm/) describes this scenario. + +Helm can install a chart either in the package (`.tgz`) or unpackaged (tree of files) to a Kubernetes cluster right away. Thus, the most minimal Helm pipeline has only two steps: + +1. Check out from Git a Helm chart described in uncompressed files. +1. Install this chart to a Kubernetes cluster. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/helm-direct-deployment.png" +url="/images/guides/helm-best-practices/helm-direct-deployment.png" +alt="Simplest Helm pipeline" +caption="Simplest Helm pipeline" +max-width="70%" +%} + +You will see in the next section more efficient workflows, but the fact remains that Helm repositories are optional. There is **no** technical requirement that a Helm chart must be uploaded to a Helm repository before being deployed to a cluster. + +### Chart versions and appVersions + +Each Helm chart has the ability to define two separate versions: + +1. The version of the chart itself (`version` field in `Chart.yaml`). +1. The version of the application contained in the chart (`appVersion` field in `Chart.yaml`). + +These are unrelated and can be bumped up in any manner that you see fit. You can sync them together or have them increase independently. There is no right or wrong practice here as long as you stick into one. We will see some versioning strategies in the next section. + +### Charts and sub-charts + +The most basic way to use Helm is by having a single chart that holds a single application. The single chart will contain all the resources needed by your application such as deployments, services, config-maps etc. + +However, you can also create a chart with dependencies to other charts (a.k.a. umbrella chart), which are completely external using the `requirements.yaml` file. Using this strategy is optional and can work well in several organizations. Again, there is no definitive answer on right and wrong here, it depends on your team process. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/chart-structure.png" +url="/images/guides/helm-best-practices/chart-structure.png" +alt="Possible Helm structures" +caption="Possible Helm structures" +max-width="70%" +%} + +We will see some scenarios in the next sections on why you would want to use umbrella charts. + + +### Helm vs K8s templates + +Helm is a package manager that also happens to include templating capabilities. Unfortunately, a lot of people focus only on the usage of Helm as a template manager and nothing else. + +Technically Helm can be used as only a templating engine by stopping the deployment process in the manifest level. It is perfectly possible to use Helm only to [create plain Kubernetes manifests](https://helm.sh/docs/helm/#helm-template){:target="\_blank"} and then install them on the cluster using the standard methods (such as `kubectl`). But then you miss all the advantages of Helm (especially the application registry aspect). + +At the time of writing Helm is the only package manager for Kubernetes, so if you want a way to group your manifests and a registry of your running applications, there are no off-the-shelf alternative apart from Helm. + +Here is a table that highlights the comparison: + +Helm Feature|Alternative| +---|--- +Templating | Kustomize, k8comp, kdeploy, ktmpl, kuku, jinja, sed, awk, etc. +Manifest grouping (entity/package) | None +Application/package dependencies | None +Runtime view of cluster packages | None +Registry of applications | None +Direct rollbacks and Upgrades | None + + + + +## Helm pipelines + +With the basics out of the way, we can now see some typical Helm usage patterns. Depending on the size of your company and your level of involvement with Helm you need to decide which practice is best for you. + + +### Deploy from an unpackaged chart + +This is the simplest pipeline for Helm. The Helm chart is in the same Git repository as the source code of the application. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/helm-no-repo.png" +url="/images/guides/helm-best-practices/helm-no-repo.png" +alt="Using Helm without a Helm repository" +caption="Using Helm without a Helm repository" +max-width="70%" +%} + +The steps are the following: + +1. Code/Dockerfile/Chart is checked out from Git +1. Docker image is built (and pushed to [default Docker registry]({{site.baseurl}}/docs/integration/docker-registries/#the-default-registry)) +1. Chart is [deployed directly]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#example-installing-a-chart) to a Kubernetes Cluster + +Notice that in this pipeline there is no Helm repository involved. + +> We recommend this workflow only while you are learning Helm. Storing your Helm charts in a Helm repository is a better practice as described in the next section. + +### Package/push and then deploy + +This is the recommended approach when using Helm. First, you package and push the Helm chart into a repository, and then you deploy it to your cluster. +This way your Helm repository shows a registry of the applications that run on your cluster. You can also reuse the charts to deploy to other environments (described later in this page). + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/basic-helm-pipeline.png" +url="/images/guides/helm-best-practices/basic-helm-pipeline.png" +alt="Basic Helm application pipeline" +caption="Basic Helm application pipeline" +max-width="70%" +%} + +The Helm chart can be either in the same Git repository as the source code (as shown above) or in a different one. +Note that this workflow assumes that you [have attached a Helm repository]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#step-4-optional---import-the-helm-configuration-in-your-pipeline-definition) configuration in the pipeline. + +If you use the [Codefresh Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) you can see all your releases in the Codefresh UI. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/helm-catalog.png" +url="/images/guides/helm-best-practices/helm-catalog.png" +alt="Helm application catalog" +caption="Helm application catalog" +max-width="70%" +%} + + +This approach allows you also to reuse Helm charts. After you publish a Helm chart, in the Helm repository you can deploy it to another environment (with a pipeline or manually) using different values. + + +### Separate Helm pipelines + +Even though packaging and deploying a release in a single pipeline is the recommended approach, several companies have two different processes for packaging and releasing. + +In this case, you can create two pipelines. One that packages the Helm chart and uploads it to a Helm repository, and another one that deploys to a cluster from the Helm chart. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/push-and-deploy.png" +url="/images/guides/helm-best-practices/push-and-deploy.png" +alt="Push and deploy in different pipelines" +caption="Push and deploy in different pipelines" +max-width="70%" +%} + +While this approach offers flexible releases (as one can choose exactly what is released and what is not), it also raises the complexity of deployments. You need to pass parameters on the deployment pipeline to decide which chart version will be deployed. + +In Codefresh, you can also have the two pipelines automatically [linked together]({{site.baseurl}}/docs/integrations/codefresh-api/#using-codefresh-from-within-codefresh). + +### Using Helm rollbacks + +Helm has the native capability of [rolling back](https://helm.sh/docs/helm/#helm-rollback){:target="\_blank"} a *release* to any previous *revision*. This can be done +manually or via the [Codefresh UI]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/#helm-releases-overview +). + +A more advanced usage would be to automatically rollback a release if it "fails". + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/helm-rollback.png" +url="/images/guides/helm-best-practices/helm-rollback.png" +alt="Automatic Helm rollback" +caption="Automatic Helm rollback" +max-width="70%" +%} + +In the example pipeline above, after deployment, we run some smoke tests/health checks. If they fail, +then the rollback step is executed using [pipeline conditionals]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/). + +Alternatively, you can run any other [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) after a deployment such as health checks, metric collection, load testing, etc. that decides if a deployment if a Helm rollback is needed or not. + +Integrating automatic Helm rollbacks can be used in all kinds of Helm workflows that were described in this section. + + +## Helm packaging strategies + +As mentioned before a Helm chart version is completely different than the application version it contains. This means that you can track versions on the Helm chart itself separately from the applications it defines. + +### Simple 1-1 versioning + +This is the most basic versioning approach, and it is the suggested one if you are starting out with Helm. +Don't use the `appVersion` field at all (it is optional anyway) and just keep the chart version in sync with your actual application. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/chart-version-single.png" +url="/images/guides/helm-best-practices/chart-version-single.png" +alt="Synced versions in Helm" +caption="Synced versions in Helm" +max-width="60%" +%} + +This approach makes version bumping very easy (you bump everything up) and also allows you to quickly track +what application version is deployed on your cluster (same as chart version). + +The downside of this approach is that you can't track chart changes separately. + +### Chart versus application versioning + +This is an advanced approach which you should adopt if changes are happening in the charts themselves all the time (i.e. in the templates) and you want to track them separately from the application. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/chart-version-multiple.png" +url="/images/guides/helm-best-practices/chart-version-multiple.png" +alt="Independent Helm versioning" +caption="Independent Helm versioning" +max-width="90%" +%} + +An important point here is that you need to adopt a policy in your team on what a "chart change" means. Helm does not enforce chart version changes. You can deploy a different chart with the same version as the previous one. So, if this is something that you want to do, you need to make sure that all teams are on the same page for versioning practices. + +On the plus side, this workflow allows you to individually version charts and applications and is very flexible for companies with teams that manage separately the charts from the application source code. + + +### Umbrella charts + +Umbrella charts are charts of charts. They add an extra layer of complexity on both previous approaches. +You can follow the same paradigms in umbrella charts. Either the parent chart has the same version as everything else (first approach) or it has a version on its own. + +In the second case, you need to agree with your team on when exactly the parent chart version should be bumped. Is it only when a child chart changes? Only when an application changes? or both? + +The answer does not really matter as long as your team follows the same rules. + +## Helm promotion strategies + +A Helm chart (like a Docker image) should be promoted between environments. It should start with testing and staging environments and gradually move to production ones. + +### Single repository with multiple environments + +This is the most basic deployment workflow. You have a single Helm chart (which is exactly the same across all environments). +It is deployed to multiple targets using a different set of values. + +{% include image.html +lightbox="true" +file="/images/guides/helm-best-practices/multiple-environments.png" +url="/images/guides/helm-best-practices/multiple-environments.png" +alt="Deploy to multiple environments with Helm" +caption="Deploy to multiple environments with Helm" +max-width="90%" +%} + +Codefresh has several ways to override the values for each environment within a [pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#helm-values). + + +### Chart promotion between environments + +This is the recommended deployment workflow. Codefresh can store different Helm values per environment in the [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/#using-shared-helm-values) mechanism. +Then you view and manage releases from the [Helm environments dashboard]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/). + +{% include +image.html +lightbox="true" +file="/images/guides/helm-best-practices/board.png" +url="//images/guides/helm-best-practices/board.png" +alt="Helm Environment Dashboard" +caption="Helm Environment Dashboard" +max-width="80%" +%} + +Then once you promote a Helm release either from the GUI, or the pipeline you can select exactly which configuration set of parameters you want to use: + +{% include +image.html +lightbox="true" +file="/images/guides/helm-best-practices/value-options.png" +url="/images/guides/helm-best-practices/value-options.png" +alt="Changing deployment values" +caption="Changing deployment values" +max-width="40%" +%} + +This workflow has two big advantages: + +1. You get a visual overview on what and where each Helm release is installed on +1. You can promote releases without running the initial CI/CD pipeline (that created the chart) + +### Chart promotion between repositories and environments + +A more advanced workflow (useful in organizations with multi-location deployments) is the promotion of Helm releases between both [repositories]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) and environments. + +{% include +image.html +lightbox="true" +file="/images/guides/helm-best-practices/advanced-promote.png" +url="/images/guides/helm-best-practices/advanced-promote.png" +alt="Advanced Helm promotion" +caption="Advanced Helm promotion" +max-width="90%" +%} + +There are different pipelines for: + +1. Creating the Helm chart and storing it to a staging Helm repository (i.e. the Codefresh Helm repository) +1. Deployment of the Helm chart to a staging environment. After it is tested *the chart* is promoted to one or more "production" Helm repositories +1. Deployment of the promoted Helm chart happens to one of the production environments + +While this workflow is very flexible, it adds complexity on the number of Helm charts available (since they exist in multiple Helm repositories). You also need to set up the parameters between the different pipelines so that Helm charts to be deployed can be indeed found in the expected Helm repository. + +## Related articles +[Helm quick start guide]({{site.baseurl}}/docs/quick-start/deploy-with-helm/) +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm Dashboard]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) +[Helm Promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) diff --git a/_docs/ci-cd-guides/kubernetes-templating.md b/_docs/ci-cd-guides/kubernetes-templating.md new file mode 100644 index 000000000..02eda51b9 --- /dev/null +++ b/_docs/ci-cd-guides/kubernetes-templating.md @@ -0,0 +1,214 @@ +--- +title: "Simple Kubernetes templating" +description: "Use templates in your Kubernetes manifests" +group: ci-cd-guides +toc: true +--- + +Once you start working with Kubernetes you will see the need for using templates in Kubernetes manifests for common parameters such as: + +* The docker image name of a deployment +* The docker image tag of a deployment +* Number of replicas +* Service labels +* Configmaps and other settings + +Kubernetes does not provide any templating mechanism on its own. Deployed manifests are expected to be static yaml files. An external solution is needed if you want to pass parameters in your manifests. + +The proper way to handle templates is within [Helm]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm/) . Helm is the package manager for Kubernetes and also includes templating capabilities. + +To use templates without using Helm, there are several templating solutions available including [Kustomize](https://github.com/kubernetes-sigs/kustomize){:target="\_blank"} from Google. + +Codefresh also includes its own simple templating mechanism that has built-in integration with all [pipeline variables]({{site.baseurl}}/docs/pipelines/variables/) as we will explain in this page. + +## Using the Codefresh deploy image + +Codefresh offers a public docker image at [https://hub.docker.com/r/codefresh/cf-deploy-kubernetes/tags/](https://hub.docker.com/r/codefresh/cf-deploy-kubernetes/tags/){:target="\_blank"} for easy templating of Kubernetes manifests. The source code of the image is at [https://github.com/codefresh-io/cf-deploy-kubernetes](https://github.com/codefresh-io/cf-deploy-kubernetes){:target="\_blank"}. This image can be used in a freestyle step like this: + +`YAML` +{% highlight yaml %} +{% raw %} + MyDeploy: + title: K8s Deploy + image: codefresh/cf-deploy-kubernetes:master + commands: + - /cf-deploy-kubernetes deployment.yml + environment: + - KUBECONTEXT=my-cluster-name + - KUBERNETES_NAMESPACE=my-namespace +{% endraw %} +{% endhighlight %} + +The step accepts the following environment variables: + +* `KUBECONTEXT`: Corresponds to the name of a cluster added to codefresh. +* `KUBERNETES_NAMESPACE`: The namespace to which to deploy. +* `KUBECTL_ACTION`: An action for `kubectl `. Valid values are `apply|create|replace` (default is `apply`). +* `KUBERNETES_DEPLOYMENT_TIMEOUT`: The duration to wait for a successful deployment before failing the build (defaults to 120 secs). + +The step will deploy your deployment to the cluster specified by the context and namespace given. The name of the context is the name of your cluster as seen in the [Kubernetes dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services). + +Before the deployment takes place, all Codefresh variables found in the `deployment.yml` file in the form of {% raw %}`{{MY_VARIABLE}}`{% endraw %} will be automatically replaced with their current values. + +Here is an example manifest: + +`Kubernetes manifest` +{% highlight yaml %} +{% raw %} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: my-demo-app + annotations: + branch: {{CF_BRANCH_TAG_NORMALIZED}} + source-repository: {{CF_REPO_NAME}} +spec: + replicas: 4 + template: + metadata: + labels: + name: my-demo-app + app: my-demo-app + spec: + containers: + - name: my-demo-app + image: r.cfcr.io/{{CF_ACCOUNT}}/my-sample-application:{{CF_SHORT_REVISION}} + imagePullPolicy: Always + ports: + - name: http + containerPort: 8080 + protocol: TCP +{% endraw %} +{% endhighlight %} + +In this case the image will get the replacement for your Codefresh account name and the tag will use the git revision. Metadata annotations are also defined with value from the branch name and the git repository name. + +Notice that the variables are declared as {% raw %}`{{MY_VARIABLE}}`{% endraw %} form and **NOT** {% raw %}`${{MY_VARIABLE}}`{% endraw %} which is how they are used inside the [Codefresh yaml]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) definition. + + +## Creating custom manifest replacements + +Apart from the built-in [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/) you can also create any variable on your own using the same replacement syntax. + +Here is an example manifest. + +`Kubernetes manifest` +{% highlight yaml %} +{% raw %} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: my-demo-app + annotations: + source-repository: {{CF_REPO_NAME}} + branch: {{CF_BRANCH_TAG_NORMALIZED}} + custom-label: {{MY_CUSTOM_LABEL}} +spec: + replicas: {{MY_REPLICA_NUMBER}} + template: + metadata: + labels: + name: my-demo-app + app: my-demo-app + spec: + containers: + - name: my-demo-app + image: r.cfcr.io/{{CF_ACCOUNT}}/my-sample-application:{{CF_SHORT_REVISION}} + imagePullPolicy: Always + ports: + - name: http + containerPort: 8080 + protocol: TCP + imagePullSecrets: + - name: {{PULL_SECRET}} +{% endraw %} +{% endhighlight %} + +Here you can see custom variables for an annotation, the replica number and the pull secret (in addition with the standard variables). +You can provide the values for your custom variables as environment parameters in the freestyle step. + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: my-sample-application + tag: '${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile + MyDeploy: + title: K8s Deploy + image: codefresh/cf-deploy-kubernetes:master + commands: + - /cf-deploy-kubernetes deployment.yml + environment: + - KUBECONTEXT=k8s-demo@Google + - KUBERNETES_NAMESPACE=my-namespace + - MY_CUSTOM_LABEL=build-id-${{CF_BUILD_ID}} + - MY_REPLICA_NUMBER=3 + - PULL_SECRET=codefresh-generated-r.cfcr.io-cfcr-my-namespace +{% endraw %} +{% endhighlight %} + +In the environment section you can see the values for the custom variables. We set the replica number to 3, a full string for the pull secret and a concatenated string for the annotation. + +## Using replacements in multiple manifests + +By default, the deploy step will only do replacements in a single manifest. If you have multiple Kubernetes manifests you can merge all of them in a single file, or use multiple times the deploy commands like this: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} + MyDeploy: + title: K8s Deploy + image: codefresh/cf-deploy-kubernetes:master + commands: + - /cf-deploy-kubernetes deployment.yml + - /cf-deploy-kubernetes service.yml + - /cf-deploy-kubernetes config-map.yml + environment: + - KUBECONTEXT=my-cluster-name + - KUBERNETES_NAMESPACE=my-namespace + - MY_REPLICA_NUMBER=3 + - KUBERNETES_DEPLOYMENT_TIMEOUT=360 +{% endraw %} +{% endhighlight %} + +Variable replacements will happen in all manifests before they are deployed. + + +## Using Unix command line tools for templates + +It is also perfectly possible to use any Unix templating or text editing tool such as `sed` or `awk` to perform text replacements in Kubernetes manifests. + +As a very simple example you could a replacement with the following [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) in your Codefresh pipeline. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_replacement: + image: alpine + commands: + # replace every ${TAG} with current TAG variable value + - sed -i 's/${TAG}/${{TAG}}/g' my-k8s-deployment.yaml +{% endraw %} +{% endhighlight %} + +## Related articles +[Connnecting to your cluster]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/) +[Managing your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Accessing a docker registry]({{site.baseurl}}/docs/deployments/access-docker-registry-from-kubernetes/) +[Running custom kubectl commands]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/) + + + + + + + + + \ No newline at end of file diff --git a/_docs/ci-cd-guides/microservices.md b/_docs/ci-cd-guides/microservices.md new file mode 100644 index 000000000..03b243f76 --- /dev/null +++ b/_docs/ci-cd-guides/microservices.md @@ -0,0 +1,237 @@ +--- +title: "Building microservices" +description: "Create pipelines specifically for microservice applications" +group: ci-cd-guides +toc: true +--- + +Now that you know how to [build your app]({{site.baseurl}}/docs/ci-cd-guides/packaging-compilation/) and [create Docker images]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/), we can see how Codefresh works with microservice applications. + +## Organizing pipelines for monolithic applications + +In the past, pipelines for monolithic applications tended to share the same characteristics of the application they were building. Each project had a single pipeline which was fairly complex, and different projects had completely different pipelines. Each pipeline was almost always connected to a single Git repository. + +{% include image.html +lightbox="true" +file="/images/guides/microservices/monolithic-pipelines.png" +url="/images/guides/microservices/monolithic-pipelines.png" +alt="Monolithic pipelines" +caption="Monolithic pipelines" +max-width="80%" +%} + +The complexity of each pipeline was detrimental to easy maintenance. Pipelines were typically controlled by a small team of gurus, familiar with both the internals of the application as well as the deployment environment. + +For each software project, operators handle the pipeline structure, while developers only work with the source code (going against the DevOps paradigm where all teams should share responsibility for common infrastructure and collaborate on shared problems). + +Pipeline size and complexity is often a huge pain point. Even though several tools exist for the continuous integration part of a monolithic application, continuous deployment being a different matter completely, forced a lot of companies to create their own custom in-house scripts to take care of deployment. + +## Scalability issues with microservice pipelines + +Microservices have of course several advantages regarding deployment and development, but they also come with their own challenges. Management of microservice repositories and pipelines becomes much harder as the number of applications grows. + +While a company might have to deal with 1–5 pipelines in the case of monolith applications (assuming 1–5 projects), the number quickly jumps to 25 if each monolith is divided into 5 microservices. + +These numbers are different per organization. It is perfectly normal for an application to have 10 microservices. So at a big organization that has 50 applications, the operator team is suddenly tasked with the management of 500+ pipelines. + +{% include image.html +lightbox="true" +file="/images/guides/microservices/moving-to-microservices.png" +url="/images/guides/microservices/moving-to-microservices.png" +alt="Number of pipelines is exploding" +caption="Number of pipelines is exploding" +max-width="80%" +%} + +This sudden explosion in numbers prohibits working manually with pipelines anymore. Several CI solutions do not have the capacity to work with such a high number of pipelines. + +**Here is where we reach the biggest pitfall regarding pipeline management in the era of microservices**. Several companies tried to solve the problem of microservice pipelines using shared pipeline segments. + +{% include image.html +lightbox="true" +file="/images/guides/microservices/shared-pipelines.png" +url="/images/guides/microservices/shared-pipelines.png" +alt="Shared libraries add extra complexity" +caption="Shared libraries add extra complexity" +max-width="80%" +%} + +In theory, this sounds like a good idea: + +1. Operators locate the common parts of pipelines with applications +1. A shared pipeline segment registry is created to hold all those common parts +1. Pipelines in existing projects are re-engineered to depend on the common segments +1. New projects must first examine the library of common pipeline segments and choose what is already there + +The final result is that a single pipeline is actually composed of two types of steps, those common to other pipelines, and those that are specific to that project only. + +This has lead to the development of several solutions which attempt to centralize common pipeline parts and re-use them in the form of “libraries” within software projects. The issue here is that this approach requires a very large time investment as well as a disciplined team that can communicate and cooperates on the following factors: + +1. Detecting which pipeline segments are indeed common, +1. Keeping the library of common pipeline segments up-to-date, +1. Disallowing copy-pasting of pipelines, +1. Development of brand new pipelines when needed, +1. Initial setup and pipeline bootstrap for each new project created. + +Unfortunately, in practice, as the number of microservice applications grows, teams find it very hard to keep all these principles in mind when creating new projects. + +## Reusing pipelines for microservice applications + +Codefresh is the first CI/CD solution for microservices and containers. Because we are not burdened with any legacy decisions, we are free to define a new model for Codefresh pipelines which is focused on microservices. + +The basic idea is that all microservices of a single application have almost always the same lifecycle. They are compiled, packaged, and deployed in a similar manner. Once this realization is in place, we can see that instead of having multiple pipelines for each microservice, where each one is tied to a Git repository, we have instead a single pipeline shared by all microservices. + +{% include image.html +lightbox="true" +file="/images/guides/microservices/microservice-pipelines.png" +url="/images/guides/microservices/microservice-pipelines.png" +alt="Keeping a single pipeline for all microservices" +caption="Keeping a single pipeline for all microservices" +max-width="80%" +%} + +The impact of this design cannot be understated. First of all, it should be clear that there is no need for sharing pipeline segments anymore. The whole pipeline is essentially the re-usable unit. + +This makes pipeline construction very simple. + +The biggest advantage, however, is the way new projects are created. When a new microservice is added in an application, the pipeline is already there and only a new trigger is added for that microservice. Notice that the pipeline is not connected to any specific Git repository anymore. All information for a repository is coming from the git trigger that started this pipeline. + +As an operator you can bootstrap a new project by quickly adding a new trigger on an existing pipeline: + +{% include image.html +lightbox="true" +file="/images/guides/microservices/single-pipeline.png" +url="/images/guides/microservices/single-pipeline.png" +alt="One pipeline with multiple microservices" +caption="One pipeline with multiple microservices" +max-width="80%" +%} + +This is the fastest way possible to bootstrap a new project. As the number of microservices is growing, the only thing that is growing is the list of triggers. All pipelines are exactly the same. + +## Creating reusable pipelines + +When working with microservices you need to remember that: + +1. In Codefresh a pipeline can stand on its own. It is **not** connected by default to any Git repository. +1. You can write Codefresh pipelines in a generic manner so that they can work with multiple applications. +1. If you connect multiple triggers to a single pipeline, all microservices will share that pipeline. +1. You can create multiple pipelines for each project if you have microservices with slightly different architecture. + +To create a reusable pipeline use the [generic form of the clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/): + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + compile: + title: "Create JAR" + type: "freestyle" + image: 'maven:3.5.2-jdk-8-alpine' + working_directory: "${{clone}}" + commands: + - 'mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package' +{% endraw %} +{% endhighlight %} + +This pipeline uses variables in the clone step. These variables are automatically populated by the [respective trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/). So you can connect this pipeline to any number of Java repositories and it will work on all of them (assuming they use Maven). + +{% include image.html +lightbox="true" +file="/images/guides/microservices/multiple-triggers.png" +url="/images/guides/microservices/multiple-triggers.png" +alt="Connecting multiple triggers to a single pipeline" +caption="Connecting multiple triggers to a single pipeline" +max-width="100%" +%} + +Any time you run the pipeline you can select which trigger/branch you will use. So in the first case the values will be like this: + +* `CF_REPO_OWNER=kostis-codefresh` +* `CF_REPO_NAME=spring-petclinic` +* `CF_REVISION=another-branch` + +In the second case the values will be replaced like this: + +* `CF_REPO_OWNER=codefresh-contrib` +* `CF_REPO_NAME=spring-boot-2-sample-app` +* `CF_REVISION=master` + +You can follow the same pattern for any other kind of application (NodeJS, Python, Ruby etc.) + + +## Adding a new microservice to an existing application + +As an example, let's say that you have an application with five microservices. Two of them use Java and three use NodeJs. You can easily create two pipelines for the whole application, one for each programming language. + +However, if you take advantage of [multistage Docker builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/#production-ready-docker-images-with-multi-stage-builds), you could even have a single pipeline for all five services: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: '${{CF_REPO_NAME}}' + working_directory: ./ + tag: 'multi-stage' + dockerfile: Dockerfile + deploy_to_k8s: + title: Deploy to cluster + type: deploy + kind: kubernetes + cluster: 'production-gke' + namespace: default + service: '${{CF_REPO_NAME}}' + candidate: + image: '${{build_app_image}}' +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. Checks out source code from any connected trigger +1. Creates a Docker image (assumes a multistage Dockerfile) +1. Deploys the image to a Kubernetes cluster + + +Now, if you add another microservice to the application, you can simply add a new trigger making the addition as easy as possible: + +{% include image.html +lightbox="true" +file="/images/guides/microservices/add-new-microservice.png" +url="/images/guides/microservices/add-new-microservice.png" +alt="Connecting a new trigger for a new microservice" +caption="Connecting a new trigger for a new microservice" +max-width="80%" +%} + +This is just an example pipeline. You might have another generic pipeline for Helm deployments, FTP uploads, VM images and so on. + + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Git triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) + + + + + + + diff --git a/_docs/ci-cd-guides/packaging-compilation.md b/_docs/ci-cd-guides/packaging-compilation.md new file mode 100644 index 000000000..924cb13ce --- /dev/null +++ b/_docs/ci-cd-guides/packaging-compilation.md @@ -0,0 +1,257 @@ +--- +title: "Building your app" +description: "Compile and package traditional (non-Docker) artifacts" +group: ci-cd-guides +toc: true +--- + +When you use Codefresh for continuous integration (CI), one of the most basic tasks is compiling and packaging applications. Though Codefresh has native support for Docker artifacts, it still works great with traditional (non-Dockerized) applications that don't use a Dockerfile for the actual build. + +>If your application is deployed as a Docker image, see [building Docker images]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) instead. + +## Using supporting Docker images in CI/CD environment + +Unlike other CI solutions that you might be familiar with, Codefresh build nodes are very simple. They have only Docker installed and nothing else. + +When you run a Codefresh pipeline, you choose the Docker images to be used in the CI/CD environment. Once the pipeline runs, the Docker images are automatically launched by Codefresh, and you have access to all the tools the images contain. When the pipeline completes its run, all Docker images used for the pipeline are discarded, and the build machine reverts to its original state. + +Even if your application is not itself packaged as a Docker image, Codefresh pipelines are always "Docker-based" in the sense that Docker is used for the tools that take part in the pipeline. + +This approach has a lot of advantages: + + * No maintenance effort for build nodes, as they only have Docker and nothing else. + * You can use any tool in your pipeline that you want without actually installing it first. + * All public Docker images in Docker Hub are potential pipeline steps. + * You can use different versions of the same tool in the same pipeline. + * It is very easy to upgrade a tool to a new version (just change the tag of the Docker container used) + +Notice also that unlike some other CI solutions: + +1. You can use multiple Docker images in the same pipeline, even if they contain the same tool, with no version conflicts +1. As Docker images in Codefresh pipelines have no special requirements, you can use *any* private or public Docker image. + +All [pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) in Codefresh are in fact Docker images. + + +## Choosing programming tools as Docker images + +In practice, this means that if you have a Node application, you need to use a [Node image]({{site.baseurl}}/docs/example-catalog/ci-examples/nodejs) to package your application, a [Maven image]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/) if you are working with Java, a [Python]({{site.baseurl}}/docs/learn-by-example/python/) image for Python applications, and so on. + +You launch the image using the Codefresh freestyle step. Here is an example for Node: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_node_app: + title: Running unit tests + image: node:11 + commands: + - npm install + - npm run test +{% endhighlight %} + +This pipeline downloads the `node:11` image to the Codefresh build machine, launches it, and passes it to your source code. It then runs the commands `npm install` and `npm run test`. The result is that your source code can be packaged without actually installing Node.js on the build machine beforehand. + +You can mix and match different images in the same pipeline. Let's say for example that you have a single repository that contains a front-end in Node.js and a back-end in Java. You can easily create a pipeline that deals with both: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_node_app: + title: Packaging front end + image: node:11 + working_directory: ./front-end + commands: + - npm install + - npm run test + my_jar_compilation: + title: Packaging back end + image: maven:3.5.2-jdk-8-alpine + working_directory: ./back-end + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package +{% endhighlight %} + +This pipeline compiles the Java code under the `back-end` folder, and the Javascript Web application found in the `front-end` folder. Both Docker images have access to the same workspace via [the shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). + +To get up and running with Codefresh as quickly as possible, you can simply search DockerHub for an existing image that uses the tool you need. Top-level DockerHub images are curated by the Docker team and are considered safe. So most popular programming languages already have a Docker image that you can use in your pipeline. + +Of course, you can also [create your private Docker image or use any existing image]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/) from a private or public registry. In that case, you need to write the full name of the image used. +If you use an image from GCR (Google Container Registry), or another private registry, you would specify it as in the example below. + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_bazel_app: + title: Running a Bazel build + image: gcr.io/my-registry/bazel + commands: + - bazel build //:MyProject + my_e2e_tests: + title: Running Mocha Test + image: my-azure-registry.azurecr.io/kostis-codefresh/my-jasmine-runner:1.0.1 + commands: + - jasmine init +{% endhighlight %} + +In this pipeline, Docker images have a full prefix, so they are pulled by the respective registry instead of DockerHub. + +In this manner, you can run any tool in any Codefresh pipeline as long as it is offered in a Docker image. This means that Codefresh pipelines can work with any programming language and any tool that you can use on your workstation. + +Unlike other CI solutions, you don't need to wait for the Codefresh team to add "native support" for your favorite tool in a Codefresh pipeline. You can simply package it in a Docker image yourself and use it straight away. + + +## Using multiple Docker images in a single pipeline + +Unlike other CI solutions, there is no limit on the number of Docker images that you can use in a single pipeline. Also, all Docker images included in the same pipeline have access to the same project workspace via the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). +This means that you have maximum flexibility on what tools you use in a single project. + +As an example, let's see a pipeline that uses four different images for a single project. + + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + clone: + title: Cloning main repository... + stage: prepare + type: git-clone + arguments: + repo: my-user/my-app + revision: master + git: github + package_my_code: + title: Compile application + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package + run_sonar: + title: Quality Analysis + image: sonarsource/sonar-scanner-cli + commands: + - sonar-scanner + environment: + - SONAR_HOST_URL=http://foo.acme:9000 + create_bucket: + title: Creating bucket in AWS + image: hashicorp/terraform:0.12.0 + commands: + - terraform init + - terraform apply -auto-approve + upload_file: + title: Uploading Jar file + image: mesosphere/aws-cli + commands: + - aws s3 sync ./target/app.jar s3://my-bucket/my-jar --delete +{% endhighlight %} + +This pipeline does the following: + +1. Checks out source code +1. Packages a Jar file (from the source code) +1. Runs Sonar analysis (taking into account both source code and compiled classes) +1. Creates a storage bucket in AWS (Amazon Web Services) +1. Uploads the JAR that was packaged in the bucket + +Notice how all Docker images use the same workspace without any extra configuration on your part. + +## Using different tool versions in the same pipeline + +The corollary to Docker-based pipelines is that you can use the same tool but with a different version in the **same** pipeline. +As an example, here is a pipeline that runs both Python 2.x and Python 3.x, and it just works. + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + get_deps: + title: Getting dependencies + image: python:3.6-slim + commands: + - pip install -r requirements.txt + run_my_tests: + title: Running Unit Test + image: python:2 + commands: + - pip install pytest + - pytest +{% endhighlight %} + +You can easily choose the specific version that matches each of your projects. + +
+ +Here is another example where two different applications use Node.js 11 and Node.js 9 in the same pipeline. + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +stages: + - packaging + - deploying +steps: + PackageMyNode1App: + title: Packaging Node application 1 + stage: packaging + image: node:11.1 + working_directory: ./brand-new-project + commands: + - echo "My Node version is" + - node --version + - npm install + PackageMyNode2App: + title: Packaging Node application 2 + stage: packaging + image: node:9.3.0-slim + working_directory: ./legacy-project + commands: + - echo "My Node version is" + - node --version + - npm install +{% endhighlight %} + +> These versions are per pipeline. So each team can use the versions they need for their projects without affecting the other teams. + +So one team in your company might use Terraform 0.10 in their pipelines: + + +{% highlight yaml %} + PlanWithTerraform: + image: hashicorp/terraform:0.10.0 + title: Deploying Terraform plan + stage: deploy + commands: + - terraform plan +{% endhighlight %} + +Another team can use Terraform 0.12 just by changing the YAML of their `codefresh.yml`: + +{% highlight yaml %} + DeployWithTerraform: + image: hashicorp/terraform:0.12.0 + title: Deploying Terraform plan + stage: deploy + commands: + - terraform apply -auto-approve +{% endhighlight %} + + +To summarize, you can easily use any version of any programming tool in a Codefresh pipeline without the fear of breaking +another unrelated pipeline. + + +## Related articles +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Creating Codefresh pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) + + + + + + diff --git a/_docs/ci-cd-guides/preview-environments.md b/_docs/ci-cd-guides/preview-environments.md new file mode 100644 index 000000000..15e6ee2d4 --- /dev/null +++ b/_docs/ci-cd-guides/preview-environments.md @@ -0,0 +1,347 @@ +--- +title: "Previewing dynamic environments" +description: "Deploy pull requests to cluster namespaces" +group: ci-cd-guides +toc: true +--- + + +In addition to deploying to [predefined environments]({{site.baseurl}}/docs/ci-cd-guides/environment-deployments/), for each pull request (PR), you may also need to deploy to dynamic environments, which are temporary, testing environments. For these types of environments, it is best to dynamically create an environment when a PR is created, and tear it down when the same PR is closed. + + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/dynamic-environments.png" +url="/images/guides/preview-environments/dynamic-environments.png" +alt="Dynamic Test environments" +caption="Dynamic Test environments" +max-width="90%" +%} + +Each developer works in isolation to test their features. This pattern contrasts with the traditional way of reusing static preexisting environments. + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/static-environments.png" +url="/images/guides/preview-environments/static-environments.png" +alt="Traditional static environments" +caption="Traditional static environments" +max-width="90%" +%} + +With Kubernetes you don't need to book and release specific test environments any more. Testing environments should +be handled in a transient way. + +## Preview environments with Kubernetes + +There are many options to create temporary environments with Kubernetes. + +* Namespaces for each PR + The simplest option is to use different namespaces, one for each PR. So, a PR with name `fix-db-query` is deployed to a namespace called `fix-db-query`, and a PR with name `JIRA-1434`, is deployed to a namespace called `JIRA-1434` and so on. + +* Expose the environment URL + The second option is to expose the environment URL so that developers and testers can actually preview the application +deployment either manually or via automated tests. + The two major approaches here are with host-based and path-based URLs: + * For host-based URLs, the test environments are named `pr1.example.com`, `pr2.example.com` and so on + * For path-based URLs, the test environments are named `example.com/pr1`, `example.com/pr2` and so on + + Both approaches have advantages and disadvantages. Path-based URLs are easier to set up, but may not work with all applications, as they change the web context. Host-based URLs are more robust but need extra DNS configuration for the full effect. + + In Kubernetes clusters, you can set up types of URLs via [an Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/){:target="\_blank"}. + +## Example application + +You can find the application we will use at [https://github.com/codefresh-contrib/unlimited-test-environments-source-code](https://github.com/codefresh-contrib/unlimited-test-environments-source-code){:target="\_blank"}. +It is a standard Java/Spring boot application, that includes the following characteristics: + +* It has [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) that can be targeted at any host/port. We will use those tests as smoke test that will verify the preview environment after it is deployed +* It comes bundled in [a Helm chart](https://github.com/codefresh-contrib/unlimited-test-environments-manifests){:target="\_blank"} +* It has an ingress configuration ready for path-based URLs + +We are using [the Ambassador gateway](https://www.getambassador.io/){:target="\_blank"} as an ingress for this example, but you can use any Kubernetes-compliant ingress. + +Here is the [ingress manifest](https://github.com/codefresh-contrib/unlimited-test-environments-manifests/blob/main/simple-java-app/templates/ingress.yaml){:target="\_blank"}. + +{% highlight yaml %} +{% raw %} +kind: Ingress +apiVersion: extensions/v1beta1 +metadata: + name: "simple-java-app-ing" + annotations: + kubernetes.io/ingress.class: {{ .Values.ingress.class }} + +spec: + rules: + - http: + paths: + - path: {{ .Values.ingress.path }} + backend: + serviceName: simple-service + servicePort: 80 +{% endraw %} +{% endhighlight %} + +The path of the application is configurable and can be set at deploy time. + +## Creating preview environments for each PR + +Each time a PR is created, we want to perform the following tasks: + +1. Compile the application and run unit tests. +1. Run security scans, quality checks, and everything else we need to decide if the PR is valid. +1. Create a namespace with the same name as the PR branch. Deploy the PR and expose it as a URL that has the same name as the branch. + +Here is an example pipeline that does all these tasks: + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/pull-request-preview-pipeline.png" +url="/images/guides/preview-environments/pull-request-preview-pipeline.png" +alt="Pull Request preview pipeline" +caption="Pull Request preview pipeline" +max-width="100%" +%} + +This pipeline has the following steps: + +1. A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to fetch the source code of the application. +1. A [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that runs Maven for compilation and unit tests. +1. A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create the Docker image of the application. +1. A step that scans the source code for security issues with [Snyk](https://snyk.io/){:target="\_blank"}. +1. A step that scans the container image [for security issues]({{site.baseurl}}/docs/testing/security-scanning/) with [trivy](https://github.com/aquasecurity/trivy){:target="\_blank"}. +1. A step that runs [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) by launching the app in a [service container]({{site.baseurl}}/docs/pipelines/service-containers/). +1. A step for [Sonar analysis]({{site.baseurl}}/docs/testing/sonarqube-integration/). +1. A step that clones [a second Git repository](https://github.com/codefresh-contrib/unlimited-test-environments-manifests){:target="\_blank"} with the [Helm chart]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) of the application. +1. A step that deploys the source code to a new namespace. +1. A step that [adds a comment on the PR](https://codefresh.io/steps/step/kostis-codefresh%2Fgithub-pr-comment){:target="\_blank"} with the URL of the temporary environment. +1. A step that runs smoke tests against the temporary test environment. + +Note that the integration tests and security scans are just examples of what you can do before the PR is deployed. You can insert your own steps that check the content of a PR. + +Here is the complete YAML definition: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "prepare" + - "verify" + - "deploy" + +steps: + main_clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/unlimited-test-environments-source-code" + revision: "${{CF_REVISION}}" + stage: "prepare" + + run_unit_tests: + title: Compile/Unit test + stage: prepare + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package + build_app_image: + title: Building Docker Image + type: build + stage: prepare + image_name: kostiscodefresh/spring-actuator-sample-app + working_directory: ./ + tag: '${{CF_BRANCH}}' + dockerfile: Dockerfile + scan_code: + title: Source security scan + stage: verify + image: 'snyk/snyk-cli:maven-3.6.3_java11' + commands: + - snyk monitor + scan_image: + title: Container security scan + stage: verify + image: 'aquasec/trivy' + commands: + - trivy image docker.io/kostiscodefresh/spring-actuator-sample-app:${{CF_BRANCH}} + run_integration_tests: + title: Integration tests + stage: verify + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://my-spring-app -Dsonar.organization=kostis-codefresh-github + services: + composition: + my-spring-app: + image: '${{build_app_image}}' + ports: + - 8080 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: byrnedo/alpine-curl + commands: + - "curl http://my-spring-app:8080/" + sonar_scan: + title: Sonar Scan + stage: verify + image: 'maven:3.8.1-jdk-11-slim' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository sonar:sonar -Dsonar.login=${{SONAR_TOKEN}} -Dsonar.host.url=https://sonarcloud.io -Dsonar.organization=kostis-codefresh-github + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/unlimited-test-environments-manifests" + revision: main + stage: "deploy" + deploy: + title: Deploying Helm Chart + type: helm + stage: deploy + working_directory: ./unlimited-test-environments-manifests + arguments: + action: install + chart_name: simple-java-app + release_name: my-spring-app + helm_version: 3.2.4 + kube_context: myawscluster + namespace: ${{CF_BRANCH_TAG_NORMALIZED}} + cmd_ps: '--create-namespace --wait --timeout 5m' + custom_values: + - 'image_tag=${{CF_BRANCH_TAG_NORMALIZED}}' + - 'replicaCount=3' + - 'ingress_path=/${{CF_BRANCH_TAG_NORMALIZED}}/' + add_pr_comment: + title: Adding comment on PR + stage: deploy + type: kostis-codefresh/github-pr-comment + fail_fast: false + arguments: + PR_COMMENT_TEXT: "[CI] Staging environment is at https://kostis.sales-dev.codefresh.io/${{CF_BRANCH_TAG_NORMALIZED}}/" + GIT_PROVIDER_NAME: 'github-1' + run_smoke_tests: + title: Smoke tests + stage: deploy + image: maven:3.5.2-jdk-8-alpine + working_directory: "${{main_clone}}" + fail_fast: false + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=https://kostis.sales-dev.codefresh.io/${{CF_BRANCH_TAG_NORMALIZED}}/ -Dserver.port=443 +{% endraw %} +{% endhighlight %} + +The end result of the pipeline is a deployment to the path that has the same name as the PR branch. For +example, if my branch is named `demo`, then a `demo` namespace is created on the cluster and the application +is exposed on the `/demo/` context: + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/demo-path.png" +url="/images/guides/preview-environments/demo-path.png" +alt="Temporary environment" +caption="Temporary environment" +max-width="100%" +%} + +The environment is also mentioned as a comment in the PR UI in GitHub: + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/pull-request-comment.png" +url="/images/guides/preview-environments/pull-request-comment.png" +alt="Pull Request comment" +caption="Pull Request comment" +max-width="100%" +%} + +As explained in [pull Requests]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/), we want to make this pipeline applicable only +to a PR-open event and PR-sync events that capture commits on an existing pull request. + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/pr-events.png" +url="/images/guides/preview-environments/pr-events.png" +alt="Git events for a Pull Request preview pipeline" +caption="Git events for a Pull Request preview pipeline" +max-width="100%" +%} + +Therefore, you need to set up your [pipeline triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) with the same options selected as shown in the picture above. + +## Cleaning up temporary environments + +Creating temporary environments is very convenient for developers, but can be very costly for your infrastructure if you use a cloud +provider for your cluster. For cost reasons and better resource utilization, it is best to destroy temporary environments that are no longer used. + +While you can run a batch job that automatically deletes old temporary environments, the optimal approach is to delete them as soon as the respective PR is closed. + +We can do that with a very simple pipeline that has only one step: + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/pull-request-closed-pipeline.png" +url="/images/guides/preview-environments/pull-request-closed-pipeline.png" +alt="Pipeline when a Pull Request is closed" +caption="Pipeline when a Pull Request is closed" +max-width="100%" +%} + +Here is the pipeline definition: + + `codefresh-close.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + delete_app: + title: Delete app + type: helm + arguments: + action: auth + helm_version: 3.2.4 + kube_context: myawscluster + namespace: ${{CF_BRANCH_TAG_NORMALIZED}} + commands: + - helm delete my-spring-app --namespace ${{CF_BRANCH_TAG_NORMALIZED}} + - kubectl delete namespace ${{CF_BRANCH_TAG_NORMALIZED}} +{% endraw %} +{% endhighlight %} + +The pipeline just uninstalls the Helm release for that namespace, and then deletes the namespace itself. + +To have this pipeline run only when a PR is closed, here are the [triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) to select: + +{% include image.html +lightbox="true" +file="/images/guides/preview-environments/close-events.png" +url="/images/guides/preview-environments/close-events.png" +alt="Git events for a Pull Request close pipeline" +caption="Git events for a Pull Request close pipeline" +max-width="100%" +%} + +With this setup, the pipeline runs when the PR is closed, regardless of whether it was merged or not (which is exactly what you want as in both cases the test environment is not needed anymore). + +## Viewing all environments in the Codefresh UI + +You can combine the pipeline above with any Codefresh UI dashboard if you want to see all your temporary environments in a single view. + +For more information, see: +* [Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +* [Helm promotion dashboard]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) +* [GitOps dashboard]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/#working-with-the-gitops-dashboard) + + + +## Related articles +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Working with Docker registries]({{site.baseurl}}/docs/integrations/docker-registries/) + + + + + + diff --git a/_docs/ci-cd-guides/progressive-delivery.md b/_docs/ci-cd-guides/progressive-delivery.md new file mode 100644 index 000000000..11780fdce --- /dev/null +++ b/_docs/ci-cd-guides/progressive-delivery.md @@ -0,0 +1,958 @@ +--- +title: "Progressive Delivery" +description: "Perform zero downtime deployments with Argo Rollouts" +group: ci-cd-guides +toc: true +--- + +Progressive Delivery is the practice of deploying an application in a gradual manner, allowing for minimum downtime and easy rollbacks. There are several forms of progressive delivery such as blue/green, canary, a/b, and feature flags. + +Codefresh can easily integrate with [Argo Rollouts](https://argoproj.github.io/argo-rollouts/){:target="\_blank"}, a Kubernetes operator that natively supports deployment practices for progressive delivery. + +## Installing the Argo Rollouts operator to your cluster + +To install Argo Rollouts, follow the [installation instructions](https://argoproj.github.io/argo-rollouts/installation/){:target="\_blank"}. Essentially, you need a terminal with `kubectl` access to your cluster. + +``` +kubectl create namespace argo-rollouts +kubectl apply -n argo-rollouts -f https://raw.githubusercontent.com/argoproj/argo-rollouts/stable/manifests/install.yaml +``` + +You can optionally install the [CLI locally](https://github.com/argoproj/argo-rollouts/releases/latest){:target="\_blank"}, if you want to have more visibility in your deployments. + + +## Blue/Green deployments + +Blue/Green deployments are one of the simplest ways to minimize deployment downtime. Blue/Green deployments are not specific to Kubernetes, and can be used even for traditional applications that reside on Virtual Machines. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/how-blue-green-works.png" +url="/images/guides/progressive-delivery/how-blue-green-works.png" +alt="Blue/Green Deployments" +caption="Blue/Green Deployments" +max-width="50%" +%} + +1. At first all users of the application are routed to the current version (shown in blue). A key point is that all traffic passes from a load balancer. +1. A new version is deployed (shown in green). As this version does not receive any live traffic, all users are still served by the previous/stable version. +1. Developers can test internally the new green version, and verify its validity. If it is valid, traffic is switched to that new version. +1. If everything goes well, the old version is completely discarded. We are back to the initial state (order of colors does not matter). + +The major benefit of this pattern is that if at any point in time the new version has issues, all users can be switched back to the previous version (via the load balancer). Switching via the load balancer is much faster than redeploying a new version, resulting in minimum disruption for existing users. + +There are several variations of this pattern. In some cases, the old color is never destroyed but keeps running in the background. You can also retain even older versions online, maybe with a smaller footprint, allowing for easy switching to any previous application revision. + +### Blue/Green Kubernetes Deployment with Argo Rollouts + +Even though Argo Rollouts supports the basic blue/green pattern described in the previous section, it also offers a wealth of [customization options](https://argoproj.github.io/argo-rollouts/features/bluegreen/){:target="\_blank"}. +One of the most important additions is the ability to "test" the upcoming color by introducing a "preview" [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/){:target="\_blank"}, in addition to the service used for live traffic. +This preview service can be used by the team that performs the deployment to verify the new version before actually switching the traffic. + + +Here is the initial state of a deployment. The example uses two pods (shown as `xnsdx` and `jftql` in the diagram). + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/01_initial.png" +url="/images/guides/progressive-delivery/01_initial.png" +alt="Initial deployment. All services point to active version" +caption="Initial deployment. All services point to active version" +max-width="90%" +%} + +There are two Kubernetes services: +* A `rollout-blue-gree-active` service that captures all live traffic from actual users of the application (internet traffic coming from `51.141.221.40`). +* A secondary service called `rollout-bluegreen-preview`. Under normal circumstances it also points to the same live version. + + +Once a deployment starts, a new "color" is created. In the example we have two new pods that represent the next version of the application to be deployed (shown as `9t67t` and `7vs2m`). + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/02_two_colors.png" +url="/images/guides/progressive-delivery/02_two_colors.png" +alt="Deployment in progress. Active users see old version. Internal users preview new version" +caption="Deployment in progress. Active users see old version. Internal users preview new version" +max-width="90%" +%} + +The important point here is the fact that the normal "active" service still points to the old version, while the "preview" service points to the new pods. This means that all active users are still on the old/stable deployment, while internal teams can use the "preview" service to test the new deployment. + +If everything goes well, the next version is promoted to be the active version. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/03_switch_traffic.png" +url="/images/guides/progressive-delivery/03_switch_traffic.png" +alt="Next application version is promoted. All users see new version" +caption="Next application version is promoted. All users see new version" +max-width="90%" +%} + +Here both services point to the new version. This is also the critical moment for all actual users of the application, as they are now switched to use the new version of the application. The old version is still around but no traffic is sent to it. + +Having the old version around is a great failsafe, as one can abort the deployment process and switch back all active users to the old deployment in the fastest way possible. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/04_scale_down.png" +url="/images/guides/progressive-delivery/04_scale_down.png" +alt="Old application version is discarded. Only new version remains." +caption="Old application version is discarded. Only new version remains." +max-width="90%" +%} + +After the configured duration, as [defined in Argo Rollouts](https://argoproj.github.io/argo-rollouts/features/bluegreen/#scaledowndelayseconds){:target="\_blank"}, the old version is scaled down completely to preserve resources. We are now back +to the same configuration as the initial state, and the next deployment will follow the same sequence of events. + + +### Example application + +You can find an example application at [https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app](https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app){:target="\_blank"}, that also includes simple integration tests. + +Notice that the first deployment of your application will NOT follow the blue/green deployment process as there is no "previous" color. So you need to deploy it at least once. + +``` +git clone https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app.git +cd argo-rollout-blue-green-sample-app +kubectl create ns blue-green +kubectl apply -f ./blue-green-manual-approval -n blue-green +``` + +You can then monitor what Argo Rollouts is doing with the following command: + +``` +kubectl argo rollouts get rollout spring-sample-app-deployment --watch -n blue-green +``` + +### Blue/Green deployment with manual approval + +A quick way to use blue/green deployments is by simply inserting [an approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) before the traffic switch step. +This will pause the pipeline and the developers or QA can test the next version on their own before any live users are redirected to it. + +Here is an example pipeline: + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/approval-pipeline.png" +url="/images/guides/progressive-delivery/approval-pipeline.png" +alt="Manual approval before traffic switch" +caption="Manual approval before traffic switch" +max-width="100%" +%} + +This pipeline does the following: + +1. [Clones]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) the source code of the application. +1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image. +1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest, and creates a new "color" for the next version +1. The pipeline is paused and waits for an [approval/rejection]({{site.baseurl}}/docs/pipelines/steps/approval/#getting-the-approval-result) by a human user. +1. If the pipeline is approved, the new color is promoted, and becomes the new active version. +1. If the pipeline is rejected, the new color is discarded, and all live users are not affected in any way. + +Here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - deploy + - finish +steps: + clone: + type: "git-clone" + stage: prepare + description: "Cloning main repository..." + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: "${{CF_BRANCH}}" + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: kostiscodefresh/argo-rollouts-blue-green-sample-app + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile + start_deployment: + title: Deploying new color + stage: deploy + image: codefresh/cf-deploy-kubernetes:master + working_directory: "${{clone}}" + commands: + - /cf-deploy-kubernetes ./blue-green-manual-approval/service.yaml + - /cf-deploy-kubernetes ./blue-green-manual-approval/service-preview.yaml + - /cf-deploy-kubernetes ./blue-green-manual-approval/rollout.yaml + environment: + - KUBECONTEXT=mydemoAkscluster@BizSpark Plus + - KUBERNETES_NAMESPACE=blue-green + wait_for_new_color: + fail_fast: false + type: pending-approval + title: Is the new color ok? + stage: deploy + promote_color: + title: Switching traffic to new color + stage: finish + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 promote spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: wait_for_new_color + on: + - approved + abort_deployment: + title: Keeping the existing color + stage: finish + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 undo spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: wait_for_new_color + on: + - denied +{% endraw %} +{% endhighlight %} + +Just before the approval, you can optionally execute the Argo Rollouts CLI to see what is happening behind the scenes: + +``` +kubectl argo rollouts get rollout spring-sample-app-deployment --watch -n blue-green +``` + +It should show the new color come up, but not accepting any traffic. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/monitor-argo-rollouts.png" +url="/images/guides/progressive-delivery/monitor-argo-rollouts.png" +alt="Argo Rollouts CLI" +caption="Argo Rollouts CLI" +max-width="100%" +%} + +Once the deployment is complete, the old pods are destroyed after 30 seconds (this is the default value of Argo Rollouts). + + + +### Blue/Green deployment with smoke tests + +Using manual approval before promoting the new version is a great starting point. To truly achieve continuous deployment, one should automate the testing process and eliminate the human approval. + +There are many approaches on testing a release, and each organization will have a different set of "tests" that verify the next version of the software. Argo Rollouts +has [several integrations](https://argoproj.github.io/argo-rollouts/features/analysis/){:target="\_blank"} either with metric providers or [simple Kubernetes jobs](https://argoproj.github.io/argo-rollouts/analysis/job/){:target="\_blank"} that can run integration tests or collect metrics and decide if the next color should be promoted or not. + +Another alternative is to simply execute [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) from within Codefresh. This is great if your integration tests need access to the source code or other external services that are accessible only to Codefresh. + +We can modify the previous pipeline to include automated smoke tests that are already part of the [example application](https://github.com/codefresh-contrib/argo-rollout-blue-green-sample-app/blob/main/src/test/java/sample/actuator/HealthIT.java){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/smoke-tests-pipeline.png" +url="/images/guides/progressive-delivery/smoke-tests-pipeline.png" +alt="Smoke tests before traffic switch" +caption="Smoke tests before traffic switch" +max-width="100%" +%} + +This pipeline does the following: + +1. [Clones]({{site.baseurl}}/docs/examples/example-catalog/ci-examples/git-checkout/) the source code of the application. +1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image +1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest and creates a new "color" for the next version. +1. Runs integration tests against the "preview" service created by Argo Rollouts. Live users are still on the previous/stable version of the application. +1. If smoke tests pass, the new color is promoted and becomes the new active version. +1. If smoke tests fail, the new color is discarded and all live users are not affected in any way. + +Here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - deploy + - finish +steps: + clone: + type: "git-clone" + stage: prepare + description: "Cloning main repository..." + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: "${{CF_BRANCH}}" + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: kostiscodefresh/argo-rollouts-blue-green-sample-app + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile + start_deployment: + title: Deploying new color + stage: deploy + image: codefresh/cf-deploy-kubernetes:master + working_directory: "${{clone}}" + commands: + - /cf-deploy-kubernetes ./blue-green-manual-approval/service.yaml + - /cf-deploy-kubernetes ./blue-green-manual-approval/service-preview.yaml + - /cf-deploy-kubernetes ./blue-green-manual-approval/rollout.yaml + environment: + - KUBECONTEXT=mydemoAkscluster@BizSpark Plus + - KUBERNETES_NAMESPACE=blue-green + run_integration_tests: + title: Smoke tests + stage: deploy + image: maven:3.5.2-jdk-8-alpine + working_directory: "${{clone}}" + fail_fast: false + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://13.86.102.74 -Dserver.port=80 + promote_color: + title: Switching traffic to new color + stage: finish + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 promote spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: run_integration_tests + on: + - success + abort_deployment: + title: Keeping the existing color + stage: finish + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 undo spring-sample-app-deployment -n blue-green --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: run_integration_tests + on: + - failure +{% endraw %} +{% endhighlight %} + +You can optionally execute the Argo Rollouts CLI to see what is happening behind the scenes: + +``` +kubectl argo rollouts get rollout spring-sample-app-deployment --watch -n blue-green +``` + +>For the sake of simplicity, we have hardcoded the load balancer for the preview service at 13.86.102.74. For an actual application, you would have a DNS name such as `preview.example.com`, or use another `kubectl command` to fetch the endpoint of the load balancer dynamically. Also, our integration tests assume that the application is already deployed, before they run. If your application takes too much time to deploy, you need to make sure that it is up before the tests actually run. + + +The end result is a continuous deployment pipeline, where all release candidates that don't pass tests never reach production. + +## Canary deployments + +Blue/Green deployments are great for minimizing downtime after a deployment, but they are not perfect. If your new version has a hidden issue that manifests itself only after some time (i.e. it is not detected by your smoke tests), then **all** your users will be affected, because the traffic switch is all or nothing. + +An improved deployment method is canary deployments. These function similar to blue/green, but instead of switching 100% of live traffic all at once to the new version, you can instead move only a subset of users. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/how-canary-deployments-work.png" +url="/images/guides/progressive-delivery/how-canary-deployments-work.png" +alt="Canary Deployments" +caption="Canary Deployments" +max-width="50%" +%} + +1. At the start, all users of the application are routed to the current version (shown in blue). A key point is that all traffic passes from a load balancer. +1. A new version is deployed (shown in green). This version gets only a very small amount of live traffic (for example 10%). +1. Developers can test internally and monitor their metrics to verify the new release. If they are confident, they can redirect more traffic to the new version (for example 33%). +1. If everything goes well, the old version is completely discarded. All traffic is now redirected to the new version. We are back to initial state (order of colors does not matter). + +The major benefit of this pattern is that if at any point in time the new version has issues, only a small subset of live users are affected. And like blue/green deployments, performing a rollback is as easy as resetting the load balancer to send no traffic to the canary version. Switching the load balancer is much faster than redeploying a new version, resulting in minimum disruption for existing users. + +There are several variations of this pattern. The amount of live traffic that you send to the canary at each step as well as the number of steps are user configurable. A simple approach would have just two steps (10%, 100%), while a more complex one could move traffic in a gradual way (10%, 30%, 60%, 90%, 100%). + +>Canary deployments are more advanced than blue/green deployments, and are also more complex to set up. The load balancer is now much smarter as it can handle two streams of traffic at the same time with different destinations of different weights. You also need a way (usually an API) to instruct the loadbalancer to change the weight distribution of the traffic streams. If you are just getting started with progressive delivery, we suggest you master blue/green deployments first, before adopting canaries. + +### Canary Deployment with Argo Rollouts + +Argo Rollouts supports the basic canary pattern described in the previous section, and also offers a wealth of [customization options](https://argoproj.github.io/argo-rollouts/features/canary/){:target="\_blank"}. +One of the most important +additions is the ability to "test" the upcoming version by introducing a "preview" [Kubernetes service](https://kubernetes.io/docs/concepts/services-networking/service/){:target="\_blank"}, in addition to the service used for live traffic. +This preview service can be used by the team that performs the deployment to verify the new version as it gets used by the subset of live users. + + +Here is the initial state of a deployment. The example uses four pods (shown as `22nqx`, `nqksq`, `8bzwh` and `jtdcc` in the diagram). + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/01_canary_initial_state.png" +url="/images/guides/progressive-delivery/01_canary_initial_state.png" +alt="Initial deployment. All services point to active version" +caption="Initial deployment. All services point to active version" +max-width="90%" +%} + +There are now three Kubernetes services: +* The `rollout-canary-all-traffic` that captures all live traffic from actual users of the application (internet traffic coming from `20.37.135.240`). +* A secondary service, `rollout-canary-active`, that always points to the stable/previous version of the software. +* A third service, `rollout-canary-preview`, that only routes traffic to the canary/new versions. + +In normal circumstances all there services point to the same version. + + +Once a deployment starts, a new "version" is created. In the example we have one new pod that represents the next version of the application to be deployed (shown as `9wx8w` at the top of the diagram). + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/02_canary_10.png" +url="/images/guides/progressive-delivery/02_canary_10.png" +alt="Deployment in progress. 10% of users are sent to the canary version" +caption="Deployment in progress. 10% of users are sent to the canary version" +max-width="90%" +%} + +The important point here is the fact that the service used by live users (called `rollout-canary-all-traffic`) routes traffic to **both** the canary and the previous version. It is not visible in the diagram, but only 10% of traffic is sent to the single pod that hosts the new version, while 90% of traffic goes to the four pods of the old version. + +The `rollout-canary-preview` service goes only to the canary pod. You can use this service to examine metrics from the canary or even give it to users who always want to try the new version first (e.g. your internal developers). On the other hand, the `rollout-canary-active` service always goes to the stable version. You can use that for users who never want to try the new version first or for verifying how something worked in the previous version. + + + +If everything goes well, and you are happy with how the canary works, we can redirect some more traffic to it. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/03_canary_33.png" +url="/images/guides/progressive-delivery/03_canary_33.png" +alt="Deployment in progress. 33% of users are sent to the canary version" +caption="Deployment in progress. 33% of users are sent to the canary version" +max-width="90%" +%} + +We are now sending 33% of live traffic to the canary (the traffic weights are not visible in the picture). To accommodate the extra traffic, the canary version now has two pods instead of one. This is also another great feature of Argo Rollouts. The amount of pods you have in the canary is completely unrelated to the amount of traffic that you send to it. You can have all possible combinations that you can think of (e.g. 10% of traffic to five pods, or 50% of traffic to three pods and so on). It all depends on the resources used by your application. + +It makes sense of course to gradually increase the number of pods in the canary as you shift more traffic to it. + +Having the old version around is a great failsafe, as one can abort the deployment process and switch back all active users to the old deployment in the fastest way possible +by simply telling the load balancer to move 100% of traffic back to the previous version. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/04_canary_finished.png" +url="/images/guides/progressive-delivery/04_canary_finished" +alt="Old application version is discarded. Only new version remains." +caption="Old application version is discarded. Only new version remains." +max-width="90%" +%} + +Two more pods are launched for the canary (for a total of four), and finally we can shift 100% of live traffic to it. After some time,the old version is scaled down completely to preserve resources. We are now back +to the same configuration as the initial state, and the next deployment will follow the same sequence of events. + +### Example application + +You can find an example application at [https://github.com/codefresh-contrib/argo-rollout-canary-sample-app](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app){:target="\_blank"} that also includes simple metrics (we will use them in the second example with canaries). + +Notice that the first deployment of your application will NOT follow the canary deployment process as there is no "previous" version. So you need to deploy it at least +once. + +``` +git clone https://github.com/codefresh-contrib/argo-rollout-canary-sample-app.git +cd argo-rollout-canary-sample-app +kubectl create ns canary +kubectl apply -f ./canary-manual-approval -n canary +``` + +You can then monitor what argo rollouts is doing with the following command: + +``` +kubectl argo rollouts get rollout golang-sample-app-deployment --watch -n canary +``` + +### Choosing a solution for Traffic Management + +Unlike Blue/Green deployments, canary deployments require a smarter way to handle incoming traffic to your application. Specifically for Kubernetes, you need a networking solution that can split traffic according to percentages. Kubernetes on its own performs simple load balancing where the number of pods affects the traffic they get. But that is not enough for canary deployments. + +Argo Rollouts has [several integrations](https://argoproj.github.io/argo-rollouts/features/traffic-management/){:target="\_blank"} with Service Meshes and ingresses that can be used for Traffic Splits. + +Apart from the platforms that are supported natively by Argo Rollouts, you can also use any solution that implements the [Service Mesh Interface (SMI)](https://smi-spec.io/){:target="\_blank"}, a common +standard for service mesh implementations. Argo Rollouts [adheres to the SMI spec](https://argoproj.github.io/argo-rollouts/features/traffic-management/smi/){:target="\_blank"}, and can instruct any compliant solution for the traffic split process during canaries. + +In our example we are using [LinkerD](https://linkerd.io/){:target="\_blank"}, an open source service mesh solution for Kubernetes that also implements SMI. +You can install LinkerD by following [the official documentation](https://linkerd.io/2.10/getting-started/){:target="\_blank"} in your cluster and then making sure that your application is [meshed](https://linkerd.io/2.10/tasks/adding-your-service/){:target="\_blank"} (i.e. it is managed by LinkerD) by adding the special annotation [linkerd.io/inject:enabled](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/canary-manual-approval/rollout.yaml#L36){:target="\_blank"} in the rollout YAML. + + +### Canary deployment with manual approval + +As with Blue/Green deployments, the easiest way to use canaries is by simply inserting [an approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) before each subsequent traffic switch step. +This will pause the pipeline and the developers or QA team can evaluate the canary stability. + +Here is the [Canary setup](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/canary-manual-approval/rollout.yaml#L8){:target="\_blank"}: + +`rollout.yaml` (excerpt) +```yaml +spec: + replicas: 4 + strategy: + canary: + canaryService: rollout-canary-preview + stableService: rollout-canary-active + trafficRouting: + smi: + trafficSplitName: rollout-example-traffic-split + rootService: rollout-canary-all-traffic + steps: + - setWeight: 10 + - setCanaryScale: + weight: 25 + - pause: {} + - setWeight: 33 + - setCanaryScale: + weight: 50 + - pause: {} +``` + +The canary has essentially three stages. At the beginning, it gets only 10% of the traffic and then it stops. At this point it creates 1/4 of pods. Then +if we promote it, it gets 33% of the traffic and is now scaled up to 1/2 the number of pods constituting a full deployment. We pause again and then finally it gets 100% of +live traffic. + + +Here is the pipeline with canary steps: + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/canary-manual-approval-pipeline.png" +url="/images/guides/progressive-delivery/canary-manual-approval-pipeline.png" +alt="Manual approval with two intermediate canary steps" +caption="Manual approval with two intermediate canary steps" +max-width="100%" +%} + +This pipeline does the following: + +1. [Clones]({{site.baseurl}}/docs/example-catalog/examples/git-checkout/) the source code of the application. +1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image +1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest and creates a new version. 10% of live traffic is redirected to it. +1. The pipeline is paused and waits for an [approval/rejection]({{site.baseurl}}/docs/pipelines/steps/approval/#getting-the-approval-result) by a human user. +1. If the pipeline is approved, 33% of traffic is now sent to the canary. If the pipeline is rejected, the canary is discarded and all traffic goes back to the stable version. +1. In the next pause, the pipeline waits for a second approval. +1. If the pipeline is approved, all traffic is now sent to the canary. If the pipeline is rejected, the canary is discarded and all traffic goes back to the stable version. + +Here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - 'canary 10%' + - 'canary 33%' + - finish +steps: + clone: + type: "git-clone" + stage: prepare + description: "Cloning main repository..." + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: "${{CF_BRANCH}}" + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: kostiscodefresh/argo-rollouts-canary-sample-app + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile + start_deployment: + title: Deploy to 10% of live traffic + stage: 'canary 10%' + image: codefresh/cf-deploy-kubernetes:master + working_directory: "${{clone}}" + commands: + - /cf-deploy-kubernetes ./canary-manual-approval/service.yaml + - /cf-deploy-kubernetes ./canary-manual-approval/service-preview.yaml + - /cf-deploy-kubernetes ./canary-manual-approval/service-all.yaml + - /cf-deploy-kubernetes ./canary-manual-approval/rollout.yaml + environment: + - KUBECONTEXT=mydemoAkscluster@BizSpark Plus + - KUBERNETES_NAMESPACE=canary + check_canary_10: + fail_fast: false + type: pending-approval + title: Is canary ok? + stage: 'canary 10%' + promote_canary_33: + title: Switching 33% traffic to canary + stage: 'canary 33%' + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 promote golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: check_canary_10 + on: + - approved + abort_deployment_10: + title: Discarding canary at 10% + stage: 'canary 10%' + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 undo golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: check_canary_10 + on: + - denied + exit_10: + title: Stopping pipeline + stage: 'canary 10%' + image: alpine:39 + commands: + - echo "Canary failed" + - exit 1 + when: + steps: + - name: check_canary_10 + on: + - denied + check_canary_33: + fail_fast: false + type: pending-approval + title: Is canary ok? + stage: 'canary 33%' + promote_canary_full: + title: Switching all traffic to canary + stage: finish + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 promote golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: check_canary_33 + on: + - approved + abort_deployment_33: + title: Discarding canary at 33% + stage: 'canary 33%' + image: kostiscodefresh/kubectl-argo-rollouts:latest + commands: + - /app/kubectl-argo-rollouts-linux-amd64 undo golang-sample-app-deployment -n canary --context "mydemoAkscluster@BizSpark Plus" + when: + steps: + - name: check_canary_33 + on: + - denied + exit_33: + title: Stopping pipeline + stage: 'canary 33%' + image: alpine:39 + commands: + - echo "Canary failed" + - exit 1 + when: + steps: + - name: check_canary_33 + on: + - denied +{% endraw %} +{% endhighlight %} + +Just before the approval, you can optionally execute the Argo Rollouts CLI to see what is happening behind the scenes: + +``` +kubectl argo rollouts get rollout golang-sample-app-deployment --watch -n canary +``` + +It should show the status of the canary pods with amount of traffic that is redirected to it. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/canary-watch.png" +url="/images/guides/progressive-delivery/canary-watch.png" +alt="Argo Rollouts CLI" +caption="Argo Rollouts CLI" +max-width="100%" +%} + +In the above picture, the canary deployment has just started. There is only one pod for the canary that gets 10% of live traffic. The four pods of the previous version still receive 90% percent of live traffic. + +You can also see the traffic split in the [LinkerD Dashboard](https://linkerd.io/2.10/reference/architecture/#dashboard){:target="\_blank"}: + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/canary-traffic-split.png" +url="/images/guides/progressive-delivery/canary-traffic-split.png" +alt="Linkerd Traffic split details" +caption="Linkerd Traffic split details" +max-width="80%" +%} + +The screenshot above is from the second stage of the canary where 33% of live traffic is redirected to the canary pods. +You can also get the same information from the command line with `kubectl get trafficsplit`. + +### Choosing a solution for automated metric analysis + +Canary deployments with manual pauses are great to get started but can quickly become cumbersome and error-prone. Ideally, the canary should automatically promote itself if the application "looks good". One of the most straightforward ways to examine application health is by reading its metrics and decide on the progress of the canary in a completely automated way. + +There are two main sources for metrics that you can use + +1. Application-specific metrics. This requires instrumentation in your application but is very powerful as you can query exactly what you want. +1. Cluster-level metrics (i.e. from the service mesh). These are very easy to set up, but are generic and deal mostly with the traffic the application receives. + + +Argo Rollouts has native integration for [several metric providers](https://argoproj.github.io/argo-rollouts/features/analysis/){:target="\_blank"}. We will use Prometheus in our example. +The example application [is already instrumented](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/main.go#L51){:target="\_blank"} to expose some basic metrics. + +First, you need to install Prometheus by following [the official documentation](https://prometheus.io/docs/prometheus/latest/installation/){:target="\_blank"}. Then you need to make sure that Prometheus will actually scrape your application. Prometheus has [native service discovery for Kubernetes](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config){:target="\_blank"} but you need to enable it in the configuration. + +If you [install Prometheus with the Helm chart](https://github.com/prometheus-community/helm-charts){:target="\_blank"}, Kubernetes service discovery is already enabled. The only thing to set up is to add the `prometheus.io/scrape: "true"` annotation in your rollout so that Prometheus does not ignore your application. + +You can optionally install [Graphana](https://grafana.com/){:target="\_blank"} so that you can inspect your application metrics before using them in the canary process. The example application has an [basic dashboard](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/graphana/graphana-dashboard.json){:target="\_blank"} +that you can import: + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/graphana-dashboard.png" +url="/images/guides/progressive-delivery/graphana-dashboard.png" +alt="Prometheus metrics from the application" +caption="Prometheus metrics from the application" +max-width="90%" +%} + +Next you need a way to filter your metrics so that you can query only those from the canary pods and not the stable pods. There are many ways to do this, but the easiest one is to simply have Argo Rollouts put special labels/tags in the canary pods. Then you can write any Prometheus Query and focus only on the canary instances: + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/canary-metrics.png" +url="/images/guides/progressive-delivery/canary-metrics.png" +alt="Canary metrics during a deployment" +caption="Canary metrics during a deployment" +max-width="100%" +%} + +For the decision on how to promote the canary, you need to examine your application and decide which metrics you consider representative for the health of the application. +For our example we have a simple query that checks number of successful calls (i.e. that return HTTP code 200) vs the number of all calls. Every number below 100% means that the application has calls that return with error. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/canary-decision.png" +url="/images/guides/progressive-delivery/canary-decision.png" +alt="The query that will promote or cancel the canary" +caption="The query that will promote or cancel the canary" +max-width="100%" +%} + +Note that Argo Rollouts can evaluate multiple queries when deciding if the canary is health or not. You are not constrained to a single query. + + +### Canary deployment with metric evaluation + +Once your have your metric solution in place we need to instruct Argo Rollouts to use it during a deployment. + +This happens with an [Analysis CRD](https://github.com/codefresh-contrib/argo-rollout-canary-sample-app/blob/main/canary-with-metrics/analysis.yaml){:target="\_blank"}. + +`analysis.yaml` +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: success-rate +spec: + args: + - name: service-name + metrics: + - name: success-rate + interval: 2m + count: 2 + # NOTE: prometheus queries return results in the form of a vector. + # So it is common to access the index 0 of the returned array to obtain the value + successCondition: result[0] >= 0.95 + provider: + prometheus: + address: http://prom-release-prometheus-server.prom.svc.cluster.local:80 + query: sum(response_status{app="{{args.service-name}}",role="canary",status=~"2.*"})/sum(response_status{app="{{args.service-name}}",role="canary"}) +``` + +This Analysis template instructs Argo Rollouts to contact the internal Prometheus server every two minutes for a query that checks the successful HTTP calls +to the application. If the percentage of HTTP calls that return 200 is more than 95% then the canary will be promoted. Otherwise the canary will fail. + +The Analysis can be reused by multiple deployments as the name of the service is configurable. The parameter is filled in the Rollout definition. + +`rollout.yaml` (excerpt) +```yaml +spec: + replicas: 4 + strategy: + canary: + canaryService: rollout-canary-preview + stableService: rollout-canary-active + canaryMetadata: + annotations: + role: canary + labels: + role: canary + trafficRouting: + smi: + trafficSplitName: rollout-example-traffic-split + rootService: rollout-canary-all-traffic + steps: + - setWeight: 10 + - setCanaryScale: + weight: 25 + - pause: {duration: 5m} + - setWeight: 33 + - setCanaryScale: + weight: 50 + - pause: {duration: 5m} + analysis: + templates: + - templateName: success-rate + startingStep: 4 # delay starting analysis run until setWeight: 10% + args: + - name: service-name + value: golang-sample-app +``` + +Here you can see that instead of waiting for ever after each canary step, we instead wait for 5 minutes at 10% of traffic and 5 more minutes at 50% of traffic. During that time the Prometheus Analysis is running automatically behind the scenes. + +The Codefresh pipeline is now very simple: + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/canary-metrics-pipeline.png" +url="/images/guides/progressive-delivery/canary-metrics-pipeline.png" +alt="Fully automated Canary pipeline" +caption="Fully automated Canary pipeline" +max-width="100%" +%} + +This pipeline does the following: + +1. [Clones]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) the source code of the application. +1. [Builds]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) a Docker image. +1. [Deploys]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) the application by updating the Kubernetes manifests. Argo Rollouts sees the new manifest and creates a new version and starts the canary process. + +Here is the pipeline definition: For more information, see [What is the Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/): + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - deploy +steps: + clone: + type: "git-clone" + stage: prepare + description: "Cloning main repository..." + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: "${{CF_BRANCH}}" + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: kostiscodefresh/argo-rollouts-canary-sample-app + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile + build_arguments: + - git_hash=${{CF_SHORT_REVISION}} + start_deployment: + title: Start canary + stage: deploy + image: codefresh/cf-deploy-kubernetes:master + working_directory: "${{clone}}" + commands: + - /cf-deploy-kubernetes ./canary-with-metrics/service.yaml + - /cf-deploy-kubernetes ./canary-with-metrics/service-preview.yaml + - /cf-deploy-kubernetes ./canary-with-metrics/service-all.yaml + - /cf-deploy-kubernetes ./canary-with-metrics/analysis.yaml + - /cf-deploy-kubernetes ./canary-with-metrics/rollout.yaml + environment: + - KUBECONTEXT=mydemoAkscluster@BizSpark Plus + - KUBERNETES_NAMESPACE=canary +{% endraw %} +{% endhighlight %} + +The pipeline is very simple because Argo Rollouts does all the heavy lifting behind the scenes. + +You can see the Analysis running with + +``` +kubectl argo rollouts get rollout golang-sample-app-deployment --watch -n canary +``` + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/canary-watch-metrics.png" +url="/images/guides/progressive-delivery/canary-watch-metrics.png" +alt="Running the Analysis in the background" +caption="Running the Analysis in the background" +max-width="100%" +%} + +For each deployment you can also see the result of the Analysis along with the canary pods. The number next to the checkmark shows how many times the analysis will run (this is defined by the `count` property in the Analysis file). See the [Canary specification](https://argoproj.github.io/argo-rollouts/features/canary/) for more parameters. + +## Monitoring the Argo Rollouts controller + +Regardless of whether you use metric evaluation for your own applications, Argo Rollouts itself exposes Prometheus metrics +for its internal functionality. You can ingest those metrics like any other Prometheus application +and create your own dashboards if you want to get some insights on what the controller is doing. + +You can find an example dashboard at [https://github.com/argoproj/argo-rollouts/blob/master/examples/dashboard.json](https://github.com/argoproj/argo-rollouts/blob/master/examples/dashboard.json){:target="\_blank"} that can be used as a starting point. + +{% include image.html +lightbox="true" +file="/images/guides/progressive-delivery/monitor-rollout.png" +url="/images/guides/progressive-delivery/monitor-rollout.png" +alt="Integrated metrics from the Argo Rollouts controller" +caption="Integrated metrics from the Argo Rollouts controller" +max-width="80%" +%} + + +For more details, see the [metrics documentation page](https://argoproj.github.io/argo-rollouts/features/controller-metrics/){:target="\_blank"}. + +## Using Argo Rollouts with GitOps + +For simplicity reasons, we covered progressive delivery in this page using Argo Rollouts on its own. Argo Rollouts integrates seamlessly with Argo CD bringing together GitOps and Progressive delivery. + +If you use Argo CD and Argo Rollouts together you will also have access to the Codefresh GitOps dashboard to manage your deployments: + +{% include image.html + lightbox="true" + file="/images/guides/gitops/gitops-dashboard.png" + url="/images/guides/gitops/gitops-dashboard.png" + alt="The Codefresh GitOps dashboard" + caption="The Codefresh GitOps dashboard" + max-width="60%" + %} + + +See our [GitOps page]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/) for more details. + + + +## Related articles +[Deploying to predefined environments]({{site.baseurl}}/docs/ci-cd-guides/environment-deployments/) +[GitOps Deployments]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/) +[Pipelines for microservices]({{site.baseurl}}/docs/ci-cd-guides/microservices/) + + + + + diff --git a/_docs/ci-cd-guides/pull-request-branches.md b/_docs/ci-cd-guides/pull-request-branches.md new file mode 100644 index 000000000..1414d7337 --- /dev/null +++ b/_docs/ci-cd-guides/pull-request-branches.md @@ -0,0 +1,354 @@ +--- +title: "Pull requests and branches" +description: "Handle builds for pull requests or other branches" +group: ci-cd-guides +toc: true +--- + +Codefresh has native support for working with different branches and building pull requests. In particular, it has a very rich trigger model that allows you to handle specific events (such as opening a pull request or adding a comment). + +The possible actions can be seen in the trigger dialog of your pipeline: + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-git-trigger.png" +url="/images/pipeline/triggers/add-git-trigger.png" +alt="Adding GIT Trigger" +max-width="50%" +%} + +Notice however that Codefresh capabilities are always based on what your Git provider is offering. If your Git provider does not support webhooks for specific events, then these will not be available in the trigger dialog. + +## Building branches automatically + +By default, Codefresh connects to your Git provider and does the following: + +1. Auto-builds every new commit that happens in master or any other branch +2. Auto-builds every new branch when it is created + +You can change the default behavior so that it matches your own workflow using extra [Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/). + +You don't have to do anything special to set up this communication between Codefresh and your Git provider. It was set up automatically when you connected your Codefresh account to your Git provider. + +Codefresh also creates a default Git trigger the first time you create a project. + +{% include +image.html +lightbox="true" +file="/images/pipeline/triggers/default-git-trigger.png" +url="/images/pipeline/triggers/default-git-trigger.png" +alt="Default GIT trigger" +caption="Default GIT trigger" +max-width="50%" +%} + +If you create a new branch in your repository, Codefresh automatically builds it and also stores the resulting Docker image. + +``` +git checkout -b another-branch +[..make changes...] +git commit -a -m "My changes" +git push -u origin another-branch +``` + +The build will clearly define its source branch: + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/auto-branch-build.png" +url="/images/guides/branches-pull-requests/auto-branch-build.png" +alt="Building automatically new branches" +caption="Building automatically new branches" +max-width="100%" +%} + +When you commit to a Pull Request (PR), Codefresh auto-builds the PR, and you can also see the build request in the GitHub UI as well: + +{% include +image.html +lightbox="true" +file="/images/getting-started/quick-start-test-pr/auto-build-pr.png" +url="/images/getting-started/quick-start-test-pr/auto-build-pr.png" +alt="Pull Request Status" +caption="Pull Request Status (click image to enlarge)" +max-width="50%" +%} + +## Building specific branches manually + +Sometimes you want to run an ad-hoc build on a specific branch without actually committing anything. You can do that in the [run dialog of a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/#creating-new-pipelines) by selecting a branch from the dropdown menu. + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/build-specific-branch.png" +url="/images/guides/branches-pull-requests/build-specific-branch.png" +alt="Building a specific branch" +caption="Building a specific branch" +max-width="50%" +%} + +From the same dialog, you can also select a specific trigger to "emulate" for this branch if you have connected multiple triggers on the same pipeline. + +## Restricting which branches to build + +The auto-build nature of Codefresh for all branches is what you want most times. For larger projects, you might wish to restrict pipelines running only on specific branches. + +This is performed by defining [the branch field]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#pull-request-target-branch-and-branch) in the trigger dialog with a regular expression. + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/restrict-branch.png" +url="/images/guides/branches-pull-requests/restrict-branch.png" +alt="Restrict a pipeline to a single branch" +caption="Restrict a pipeline to a single branch" +max-width="50%" +%} + +The trigger above will only be activated for the `production` branch, so if a developer creates a new branch this pipeline will not run for it. Remember also that this field is actually a regular expression, so you can restrict a pipeline to a specific naming pattern (i.e. a group of branch names). + +Another popular filtering mechanism is to keep the auto-build nature of Codefresh, but enable/disable specific pipeline steps according to the branch being built. This is performed by using [step conditions]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/). +Here is an example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'multistage' + dockerfile: Dockerfile + deploy_production: + title: Deploying to production + type: deploy + stage: deploy + kind: kubernetes + cluster: 'my-prod-cluster' + namespace: default + service: my-prod-app + candidate: + image: '${{build_app_image}}' + registry: 'dockerhub' + when: + branch: + only: + - master + deploy_staging: + title: Deploying to staging + type: deploy + stage: deploy + kind: kubernetes + cluster: 'my-staging-cluster' + namespace: development + service: my-staging-app + candidate: + image: '${{build_app_image}}' + registry: 'dockerhub' + when: + branch: + only: + - /^JIRA-FEATURE-.*/i +{% endraw %} +{% endhighlight %} + +This pipeline will execute for **ALL** branches and pull requests, but: + +1. If the branch is `master` it will deploy the Docker image to the production cluster and namespace `default` +1. If the branch starts with `JIRA-FEATURE-` (e.g. JIRA-FEATURE-1234, JIRA-FEATURE-testing, JIRA-FEATURE-fixbbug), it will deploy to a staging cluster to namespace `development` +1. In all other cases of branches or pull requests, it will just build the Docker image without deploying it anywhere + +You can see that if a developer creates an unrelated branch (that doesn't match the expected name), no deployment will take place: + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/branch-step-condition.png" +url="/images/guides/branches-pull-requests/branch-step-condition.png" +alt="Restrict pipeline steps according to branch" +caption="Restrict pipeline steps according to branch" +max-width="80%" +%} + +This is a more granular way to control how your branch affects your pipeline. + +>We recommend you follow the first method of having multiple simple pipelines with different branch expressions in the trigger dialog, instead of having a single complex pipeline using step conditions. Remember that in Codefresh you can create as many pipelines as you want for a single project instead of being limited to one pipeline per project. + +## Handling pull request events + +The power of Codefresh becomes evident when you realize that you can have extra pipelines that respond to specific PR events. For example, you can have a specific pipeline that runs **only** when a PR is opened for the first time or when a PR is closed. + +You can see all supported PR events in the trigger dialog. + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/choosing-pr-events.png" +url="/images/guides/branches-pull-requests/choosing-pr-events.png" +alt="Choosing PR events for a pipeline" +caption="Choosing PR events for a pipeline" +max-width="80%" +%} + +>Remember that the events shown are those supported by your Git provider. Not all Git providers support all possible pull request events. + +You can select multiple pull request events for a single pipeline, or have multiple pipelines that respond to individual pull request events. There is no right or wrong answer as it mostly depends on how your team handles pull requests. + +The most useful events are: + +* Pull request open +* Pull request sync (when a commit happens to a PR) +* Pull request closed +* Comment added on a pull request + +There is also the shortcut checkbox for *any PR event* if you don't care about which specific event happened. + +## Trunk Based Development + +One of the most popular Git workflows is [Trunk Based development](https://trunkbaseddevelopment.com/){:target="\_blank"} with short-lived feature branches. + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/trunk-based-development.png" +url="/images/guides/branches-pull-requests/trunk-based-development.png" +alt="Trunk Based Development" +caption="Trunk Based Development" +max-width="100%" +%} + +In this process, the master branch is always ready for production. The feature branches are created from the master and can have several commits before being merged back to master. + +This process can be easily created in Codefresh with two separate pipelines: + +* The "main" pipeline that deploys master to the production environment +* The feature pipeline that checks each feature as it is developed (and optionally deploys it to a staging environment) + +As an example, here is a minimal pipeline for the master branch: + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/production-pipeline.png" +url="/images/guides/branches-pull-requests/production-pipeline.png" +alt="Pipeline that deploys to production" +caption="Pipeline that deploys to production" +max-width="100%" +%} + +The pipeline: + +1. Checks out the source code +1. Builds a Docker image +1. Creates and stores a Helm chart +1. Deploys the chart to Kubernetes + +The pipeline for feature branches is different: + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/feature-pipeline.png" +url="/images/guides/branches-pull-requests/feature-pipeline.png" +alt="Pipeline for feature branches" +caption="Pipeline for feature branches" +max-width="100%" +%} + +For each feature branch: + +1. We check out the code +1. Run linters on the source code +1. Build the Docker image +1. Run some unit tests to verify the Docker image (possible with [service containers]({{site.baseurl}}/docs/pipelines/service-containers/)) + +To implement trunk-based development, we create two triggers for these pipelines. For the production pipeline, we just make sure that the trigger is only launched when commits land on master (and only there). + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/trigger-for-production-pipeline.png" +url="/images/guides/branches-pull-requests/trigger-for-production-pipeline.png" +alt="Trigger for production pipeline" +caption="Trigger for production pipeline" +max-width="50%" +%} + +For the feature branch pipeline, we check the events for: + +* PR (pull request) Open +* PR Sync (when a commit happens on the PR) + +For the [branch specifications]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#pull-request-target-branch-and-branch) we make sure that we look only for Pull Requests that are targeted **AT** `master`. + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/trigger-for-features.png" +url="/images/guides/branches-pull-requests/trigger-for-features.png" +alt="Trigger for pull request pipeline" +caption="Trigger for pull request pipeline" +max-width="50%" +%} + +With this configuration, the whole process is as follows: + +1. A developer creates a new branch from master. Nothing really happens at this point. +1. The developer opens a new PR for this branch. The feature pipeline runs (because of the PR open checkbox). +1. The developer makes one or more commits to the branch. The feature pipeline runs again for each commit (because of the PR sync checkbox). +1. The developer commits the branch back to master. The main pipeline runs and deploys to production. + +You can fine-tune this workflow according to your needs. For example, you might also specify a naming pattern on the branches for the PR (e.g. feature-xxx) to further restrict which branches are considered ready for production. + +> We didn't need to handle the PR close/merge events. As soon as a PR is merged back to master, the Git provider sends anyway an event that a commit has happened in master, which means that the main production pipeline will take care of releasing the contents of master. + +## Git-flow + +[Git Flow](https://nvie.com/posts/a-successful-git-branching-model/){:target="\_blank"} is another popular management process for Git branches. For brevity reasons, we will not list all the details for all branch types, but it should be obvious that you can recreate all aspects of Git flow with Codefresh triggers. + +For example, to run a pipeline only for pull requests from branches named `feature-XXX` that will be merged back to `develop` branch, you can create a trigger like this: + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/git-flow-feature-trigger.png" +url="/images/guides/branches-pull-requests/git-flow-feature-trigger.png" +alt="Git flow feature branch trigger" +caption="Git flow feature branch trigger" +max-width="50%" +%} + +To launch a pipeline that will only run when a commit happens on a release branch named `release-XXX`, you can create a trigger like this: + +{% include image.html +lightbox="true" +file="/images/guides/branches-pull-requests/git-flow-release-pipeline-trigger.png" +url="/images/guides/branches-pull-requests/git-flow-release-pipeline-trigger.png" +alt="Git flow release branch trigger" +caption="Git flow release branch trigger" +max-width="50%" +%} + +In a similar manner, you can create the triggers for all other branch types in Git flow. + +## Create your own workflow + +Trunk-based development and Git-flow are only some examples of what a Git workflow can look like. Your organization might follow a completely different process. Using the basic building blocks of Codefresh triggers (branch field, PR checkboxes, etc) you should be able to model your own workflow according to your own pipelines. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Git triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[YAML examples]({{site.baseurl}}/docs/example-catalog/examples/) +[Preview environments]({{site.baseurl}}/docs/ci-cd-guides/preview-environments/) + + + + + diff --git a/_docs/ci-cd-guides/working-with-docker-registries.md b/_docs/ci-cd-guides/working-with-docker-registries.md new file mode 100644 index 000000000..28ab8c91b --- /dev/null +++ b/_docs/ci-cd-guides/working-with-docker-registries.md @@ -0,0 +1,573 @@ +--- +title: "Work with Docker Registries" +description: "Push, pull, and tag Docker images in Codefresh pipelines" +group: ci-cd-guides +redirect_from: + - /docs/build-specific-revision-image/ + - /docs/image-management/build-specific-revision-image/ + - /docs/docker-registries/working-with-docker-registries/ +toc: true +--- + +Codefresh contains first-class Docker registry support. This means that you don't need to manually write `docker login` and `docker pull/push` commands within pipelines. You can use declarative YAML, and all credentials are configured in a central location once. + +## Viewing Docker images + +To see all images from [all connected registries]({{site.baseurl}}/docs/integrations/docker-registry/docker-registries/): + +* In the Codefresh UI, from the Artifacts section in the sidebar, select [**Images**](https://g.codefresh.io/images/){:target="\_blank"}. + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/docker-registry-list.png" + url="/images/guides/working-with-images/docker-registry-list.png" + alt="Codefresh Registry Image List" + caption="Codefresh Registry Image List" + max-width="70%" +%} + +Each image displays basic details such as the Git branch, commit message, hash that created it, creation date, as well as all tags. +* To view image metadata, click on the image. For details, see [Docker image metadata]({{site.baseurl}}/docs/pipelines/docker-image-metadata/). + + +**Filters for Docker images** +The top left of the Images page has several filters that allow you to search for a specific subset of Docker images. +Filters include: +* Tagged/untagged images +* Base image name +* Git branch +* Tag +* Pipeline volumes + +Multiple filters work in an `AND` manner. + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/docker-registry-filters.png" + url="/images/guides/working-with-images/docker-registry-filters.png" + alt="Codefresh Registry Image filters" + caption="Codefresh Registry Image filters" + max-width="40%" +%} + + +**Actions for Docker images** +On the right are the actions available foreach Docker image. +You can: +* Launch a Docker image as a [test environment]({{site.baseurl}}/docs/getting-started/on-demand-environments/) +* Promote a Docker image (explained in the following sections) +* Pull the image locally on your workstation with different commands +* Re-run the pipeline that created the image + + +## Pulling Docker images + +Pulling Docker images in Codefresh is completely automatic. You only need to mention a Docker image by name, and Codefresh automatically pulls it for you and uses it in a pipeline. + +### Pulling public images + +To pull a public image from Docker Hub or other public registries: + +* Specify the name of the image and tag that you want to use. + +For example: + +```yaml +CollectAllMyDeps: + title: Install dependencies + image: python:3.6.4-alpine3.6 + commands: + - pip install . +``` + +This [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) pulls the image `python:3.6.4-alpine3.6` from Docker Hub, and then runs the command `pip install .` inside it. +You can see the images that get pulled in the [Codefresh pipeline log]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/). + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/pull-public-image.png" + url="/images/guides/working-with-images/pull-public-image.png" + alt="Pulling a public image" + caption="Pulling a public image" + max-width="70%" +%} + +The image is also automatically cached in the [image cache]({{site.baseurl}}/docs/pipelines/pipeline-caching/#distributed-docker-image-caching). + +Codefresh also automatically pull for you any images mentioned in Dockerfiles (i.e. the `FROM` directive) as well as [service containers]({{site.baseurl}}/docs/pipelines/service-containers/). + + +### Pulling private images + +To pull a private image from one of your connected registries, in addition to specifying the image by name and tag, you must also prepend the appropriate prefix of the registry domain. The registry domain prefix is required for Codefresh to understand that it is a private image. + +For example, in the case of ACR (Azure Container Registry): + +``` +registry-name.azurecr.io/my-docker-repo/my-image-name:tag +``` + +Get the full name of a Docker image: +* In the Codefresh UI, from the Artifacts section in the sidebar, select [**Images**](https://g.codefresh.io/images/){:target="\_blank"}. +* Click on the image and copy the image name from the Activity column, **Image promoted** label. + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/image-dashboard-tag.png" + url="/images/guides/working-with-images/image-dashboard-tag.png" + alt="Looking at tag of a private image" + caption="Looking at tag of a private image" + max-width="65%" +%} + +The exact format of the image name depends on the type of registry you use. Codefresh uses the domain prefix of each image to understand which integration to use, and then takes care of all `docker login` and `docker pull` commands on its own behind the scenes. + +For example, if you have connected [Azure]({{site.baseurl}}/docs/integrations/docker-registries/azure-docker-registry/){:target="\_blank"}, [AWS]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/){:target="\_blank"}, and [Google]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/){:target="\_blank"} registries, you can pull three images for each in a pipeline like this: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_go_unit_tests: + title: Running Go Unit tests + image: 'us.gcr.io/project-k8s-sample-123454/my-golang-app:prod' + commands: + - go test -v + my_mvn_unit_tests: + title: Running Maven Unit tests + image: '123456789012.dkr.ecr.us-west-2.amazonaws.com/my-java-app:latest' + commands: + - mvn test + my_python_unit_tests: + title: Running Python Unit tests + image: 'my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:master' + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +Codefresh automatically logs in to each registry using the credentials you have defined centrally, and pulls all the images. The same thing will happen with Dockerfiles that mention any valid Docker image in their `FROM` directive. + + +### Pulling images created in the same pipeline + +Codefresh allows you to create a Docker image on demand and use it in the same pipeline that created it. In several scenarios (such as [unit tests]({{site.baseurl}}/docs/testing/unit-tests/)), it is very common to use a Docker image right after it is built. + +In that case, as a shortcut, Codefresh allows you to simply [specify the name]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables) of the respective [build step]({{site.baseurl}}/docs/pipelines/steps/build/). + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/python-flask-sample-app' + revision: 'master' + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-app-image + working_directory: ./ + tag: 'master' + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + image: '${{MyAppDockerImage}}' + commands: + - python setup.py test + +{% endraw %} +{% endhighlight %} + +In the above pipeline, Codefresh: + +1. Checks out source code through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image, named `my-app-image:master`. Notice the lack of `docker push` commands. +1. In the next step, automatically uses that image and runs `python setup.py test` inside it. Again, notice the lack of `docker pull` commands. + +The important line here is the following: + +{% highlight yaml %} +{% raw %} + image: ${{MyAppDockerImage}} +{% endraw %} +{% endhighlight %} + +This says to Codefresh "in this step please use the Docker image that was built in the step named `MyAppDockerImage`". + +You can see the automatic pull inside the Codefresh logs. + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/pull-private-image.png" + url="/images/guides/working-with-images/pull-private-image.png" + alt="Auto-Pulling a private image" + caption="Auto-Pulling a private image" + max-width="70%" +%} + +The image is still pushed to your default Docker registry. If you don't want this behavior, add the `disable_push` property in the build step. + + +## Pushing Docker images + +Pushing to your default Docker registry is completely automatic. All successful [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) automatically push to the default Docker registry of your Codefresh account without any extra configuration. + +To push to another registry, you only need to know how this registry is [connected to Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/), and more specifically, what is the unique name of the integration. You can see the name from your [Docker Registry integrations](https://g.codefresh.io/account-admin/account-conf/integration/registryNew), or asking your Codefresh administrator. + + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/linked-docker-registries.png" + url="/images/guides/working-with-images/linked-docker-registries.png" + alt="Name of linked Docker Registries" + caption="Name of linked Docker Registries" + max-width="50%" +%} + +Once you know the registry identifier, you can use it in any [push step]({{site.baseurl}}/docs/pipelines/steps/push/) or [build step]({{site.baseurl}}/docs/pipelines/steps/build/) by specifying the registry with that name: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + build_image: + title: Building my app image + type: build + image_name: my-app-image + dockerfile: Dockerfile + tag: 'master' + push_to_registry: + title: Pushing to Docker Registry + type: push + #Name of the build step that is building the image + candidate: '${{build_image}}' + tag: '1.2.3' + # Unique registry name + registry: azure-demo +{% endraw %} +{% endhighlight %} + +Notice that + * the `candidate` field of the push step mentions the name of the build step (`build_image`) that will be used for the image to be pushed. + * The registry is only identified by name (`azure-demo` in the example). The domain and credentials are not part of the pipeline as they are already known to Codefresh through the Docker registry integration. + + You can also override the name of the image with any custom name. This way the push step can choose any image name regardless of what was used in the build step. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + build_image: + title: Building my app image + type: build + image_name: my-app-image + dockerfile: Dockerfile + tag: 'master' + push_to_registry: + title: Pushing to Docker Registry + type: push + #Name of the build step that is building the image + candidate: '${{build_image}}' + tag: '1.2.3' + # Unique registry name + registry: azure-demo + image_name: my-company/web-app +{% endraw %} +{% endhighlight %} + +Here the build step creates an image named `my-app-image:master`, but the push step actually pushes it as `my-company/web-app:1.2.3`. + +For more examples, such as using multiple tags, or pushing in parallel, see the [push examples]({{site.baseurl}}/docs/pipelines/steps/push/#examples) + +### Pushing images with an optional prefix + +There are some registry providers that require a specific prefix for all your Docker images. This is often the name of an organization, account, or other top-level construct defined by the registry. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + build: + title: "Building Docker image" + type: "build" + image_name: "acme-company/trivial-go-web" + working_directory: "${{clone}}" + tag: "latest" + dockerfile: "Dockerfile.multistage" + stage: "build" + registry: azure +{% endraw %} +{% endhighlight %} + +The example above will push the final Docker image as `kostisazureregistry.azurecr.io/acme-company/trivial-go-web:latest`. + +However, you can also set up the prefix globally once in the [Docker registry integrations]({{site.baseurl}}/docs/integrations/docker-registries/). This way you can simplify your pipelines and have them mention only the final image name. + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/registry-prefix.png" + url="/images/guides/working-with-images/registry-prefix.png" + alt="Global registry prefix" + caption="Global registry prefix" + max-width="70%" +%} + +Using the repository prefix in the example above, automatically prefixes all your Docker images with `acme-company`. + +Now you can simplify your build/push step as below: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + build: + title: "Building Docker image" + type: "build" + image_name: "trivial-go-web" + working_directory: "${{clone}}" + tag: "latest" + dockerfile: "Dockerfile.multistage" + stage: "build" + registry: azure +{% endraw %} +{% endhighlight %} + +The final Docker image will still be `kostisazureregistry.azurecr.io/acme-company/trivial-go-web:latest`. + +## Working with multiple registries with the same domain + +With Codefresh, you can [connect multiple registries on a global level]({{site.baseurl}}/docs/integrations/docker-registries/). This allows you to create pipelines that push/pull images to different registries without having to deal with Docker credentials within the pipeline itself. + +However, there are several times where you have multiple registries that have the same domain. For example, you might have two Docker Hub accounts connected to Codefresh (so both of them can resolve images for the `docker.io` domain). + +This means that when you reference an image by domain name, as in a freestyle step for example, Codefresh might not know which Docker registry account to use for the pull action. + +> This is not a Codefresh limitation, but a Docker one. Even with vanilla Docker you cannot log in to multiple registries at the same time if they share the same domain. + +To solve this problem, Codefresh automatically detects connected registries that have the same domain and allow you to designate the primary one. The primary registry is used for image resolution when pulling Docker images. + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/primary-dockerhub.png" + url="/images/guides/working-with-images/primary-dockerhub.png" + alt="Choosing a Docker registry as the primary one if they have the same domain" + caption="Choosing a Docker registry as the primary one if they have the same domain" + max-width="90%" +%} + +In the example above, even though two Docker Hub integrations are connected to Codefresh, only the primary one is used to pull images from the `docker.io` domain. You can still use the second one in push/build steps using the `registry` property. + +You can override the default behavior in each pipeline, by adding the optional `registry_context` property to instruct Codefresh on how to use a specific registry for pulling Docker images (if you have more than one for the same domain). + + + +You can use the `registry_context` property in [build]({{site.baseurl}}/docs/pipelines/steps/build/), [push]({{site.baseurl}}/docs/pipelines/steps/push/), [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/), and [composition]({{site.baseurl}}/docs/pipelines/steps/composition/) steps. + +The `registry_context` property takes as value the name of an external connected registry. Build and composition steps accept an array of values as `registry_contexts`. In all cases, by using this optional property you instruct Codefresh to use a specific registry for pulling images. + +> The optional `registry_context` and `registry_contexts` properties only affect the **pulling** of Docker images. The registry used for *pushing* images is still declared explicitly in build and push pipeline steps. + +The syntax for the freestyle step is the following: + +{% highlight yaml %} +{% raw %} + test: + title: "Running test" + type: "freestyle" + image: "gcr.io/my-google-project/my-image:latest" + registry_context: my-second-gcr-registry # define what registry will be used for pulling the image + working_directory: "${{clone}}" + commands: + - "ls" +{% endraw %} +{% endhighlight %} + +The syntax for the build step is the following: + +{% highlight yaml %} +{% raw %} + build: + title: "Building Docker image" + type: "build" + image_name: "trivial-go-web" + working_directory: "${{clone}}" + tag: "latest" + dockerfile: "Dockerfile.multistage" + stage: "build" + registry_contexts: # define what registries will be used for pulling images + - second-dockerhub + - production-azure + registry: azure +{% endraw %} +{% endhighlight %} + + +The syntax for the push step is the following: + +{% highlight yaml %} +{% raw %} + push: + title: "Pushing 1st Docker image" + type: push + image_name: "kostiscodefresh/trivial-go-web" + tag: "latest" + stage: "push" + registry: dockerhub # Docker registry to push to + registry_context: second-dockerhub # Docker registry to pull images from + candidate: ${{build}} +{% endraw %} +{% endhighlight %} + +The syntax for the composition step is the following: + +{% highlight yaml %} +{% raw %} + my-composition: + title: Running Composition + type: composition + registry_contexts: + - first-gcr + - second-gcr + arguments: + composition: + version: '2' + services: + db: + image: postgres + composition_candidates: + test_service: + image: 'alpine:3.9' + command: printenv + working_dir: /app + environment: + - key=value +{% endraw %} +{% endhighlight %} + +Let's look at an example. We assume that we have two GCR integrations: + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/two-gcr-integrations.png" + url="/images/guides/working-with-images/two-gcr-integrations.png" + alt="Two GCR integrations" + caption="Two GCR integrations" + max-width="90%" +%} + +The first integration is the "production" one, and the second integration is the "staging" one. The production one is designated as primary. This means that by default even though both integrations work for the `gcr.io` domain, only the primary one is used in the Codefresh pipeline for pulling images. + +Let's say however that you want to build a Docker image that has a `FROM` statement from an image that exists in the staging registry. The image should be pushed to the production registry. You can use the `registry_context` property as shown below: + + +{% highlight yaml %} +{% raw %} + build: + title: "Building Docker image" + type: "build" + image_name: "gcr.io/production-project/my-image" + working_directory: "${{clone}}" + tag: "latest" + dockerfile: "Dockerfile" + stage: "build" + registry: production-gcr + registry_contexts: # define what registries will be used for pulling images + - staging-gcr +{% endraw %} +{% endhighlight %} + +Behind the scenes Codefresh will: + +1. First log in to the "staging" Docker registry using the "staging" credentials. +1. Build the Docker image, by resolving the `FROM` statements with "staging" images, pulling them as needed using the staging credentials. +1. Tag the Docker image. +1. Log in to the "production" Docker registry. +1. Push the final Docker image to the "production" registry. + +If your primary Docker registry is also the one that is used in your pipelines, you don't need the `registry_context` property at all, as this is the default behavior. When searching for an image to pull Codefresh will look at all your Docker registries (if they manage only a single domain), plus your "primary" Docker registries in case you have multiple Docker registries for the same domain. + +## Promoting Docker images + +Apart from building and pushing a brand new Docker image, you can also "promote" a Docker image by copying it from one registry to another. +You can perform this action either from the Codefresh UI or automatically from pipelines. + + +### Promoting images via the Codefresh UI + +You have the capability to "promote" any image of your choosing and push it to an external registry you have integrated into Codefresh (such as Azure, Google, Bintray etc.). + + +1. In the Codefresh UI, from the Artifacts section in the sidebar, select [**Images**](https://g.codefresh.io/images/){:target="\_blank"}. +1. To promote an image, in the row with the image, click the **Promote Image** icon on the right. + +{% + include image.html + lightbox="true" + file="/images/guides/working-with-images/docker-image-promotion.png" + url="/images/guides/working-with-images/docker-image-promotion.png" + alt="Promoting a Docker image" + caption="Promoting a Docker image" + max-width="50%" +%} + +1. From the list of connected registries, select the target registry, and define the tag that you want to push. +1. To "copy" this image from the existing registry to the target registry, click **Promote**. + +### Promoting images in pipelines + +You can also copy images from one registry to the other within a pipeline. +This is accomplished by specifying an existing image in the `candidate` field of the push step. + +For example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + promote_to_production_registry: + title: Promoting to Azure registry + type: push + candidate: us.gcr.io/project-k8s-sample-123454/my-golang-app + tag: '1.2.3' + # Unique registry name + registry: azure-demo +{% endraw %} +{% endhighlight %} + +In the example above, we promote an image from [GCR]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/) to [ACR]({{site.baseurl}}/docs/integrations/docker-registries/azure-docker-registry/), which is already set up as `azure-demo`. + +You can even "promote" Docker images within the same registry by simply creating new tags. +For example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + promote_to_production: + title: Marking image with prod tag + type: push + candidate: my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:1.2.3 + tag: 'production' + # Unique registry name + registry: azure-demo +{% endraw %} +{% endhighlight %} + +In the example above, the image `my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:1.2.3` is re-tagged as `my-azure-registry.azurecr.io/kostis-codefresh/my-python-app:prod`. A very common pattern is to mark images with a special tag such as `prod` **after** the image has been deployed successfully. + + +## Related articles +[Push pipeline step]({{site.baseurl}}/docs/pipelines/steps/push/) +[External Docker registries]({{site.baseurl}}/docs/integrations/docker-registries/) +[Accessing a Docker registry from your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/access-docker-registry-from-kubernetes/) +[Build and push an image example]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/) + diff --git a/_docs/clients/csdp-cli.md b/_docs/clients/csdp-cli.md deleted file mode 100644 index 2882c3672..000000000 --- a/_docs/clients/csdp-cli.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Download CLI" -description: "" -group: clients -toc: true ---- - -You need the Codefresh CLI to install Codefresh runtimes. -* For the initial download, you also need to generate the API key and create the API authentication context, all from the UI. -* Subsequent downloads for upgrade purposes require you to only run the download command, using existing API credentials. - -### Download Codefresh CLI -Downloading the Codefresh CLI requires you to select the download mode and OS, generate an API key, and authentication context. -1. Do one of the following: - * For first-time installation, go to the Welcome page, select **+ Install Runtime**. - * If you have provisioned a hybrid/hosted runtime, in the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtime**. -1. Download the Codefresh CLI: - * Select one of the methods. - * Generate the API key and create the authentication context. - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-download-cli.png" - url="/images/getting-started/quick-start/quick-start-download-cli.png" - alt="Download CLI to install runtime" - caption="Download CLI to install runtime" - max-width="30%" - %} - -### Upgrade Codefresh CLI -Upgrade the CLI to the latest version to prevent installation errors. -1. Check the version of the CLI you have installed: - `cf version` -1. Compare with the [latest version](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"} released by Codefresh. -1. Select and run the appropriate command: - -{: .table .table-bordered .table-hover} -| Download mode | OS | Commands | -| -------------- | ----------| ----------| -| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| -| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | -| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | -| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| -| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`| - -### Related articles -[Set up hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime) -[Install hybrid runtimes]({{site.baseurl}}/docs/runtime/installation) diff --git a/_docs/clients/upgrade-gitops-cli.md b/_docs/clients/upgrade-gitops-cli.md new file mode 100644 index 000000000..4a5e1a1e5 --- /dev/null +++ b/_docs/clients/upgrade-gitops-cli.md @@ -0,0 +1,88 @@ +--- +title: "Download/upgrade GitOps CLI" +description: "Have the latest version of the GitOps CLI" +group: installation +sub_group: gitops +toc: true +--- + +You need the Codefresh CLI to install Hybrid GitOps Runtimes, and have access to all the newest features. +For the initial download, you need to generate an API key and create the API authentication context, which you do from the UI. +When newer versions are available, the CLI automatically notifies you through a banner. You can use the existing API credentials for the upgrade. + + +## GitOps CLI installation modes +The table lists the modes available to install the GitOps CLI. + +{: .table .table-bordered .table-hover} +| Install mode | OS | Commands | +| -------------- | ----------| ----------| +| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| +| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | +| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | +| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| +| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`|```` + +## Install the GitOps CLI +Install the GitOps CLI using the option that best suits you: `curl`, `brew`, or standard download. +If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for CLI installation. + +1. Do one of the following: + * For first-time installation, go to the Welcome page, select **+ Install Runtime**. + * If you have provisioned a GitOps Runtime, in the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtime**. +1. Install the Codefresh CLI: + * Select one of the installation modes. + * Generate the API key. + * Create the authentication context: + `cf config create-context codefresh --api-key ` + + + {% include + image.html + lightbox="true" + file="/images/getting-started/quick-start/quick-start-download-cli.png" + url="/images/getting-started/quick-start/quick-start-download-cli.png" + alt="Download CLI to install runtime" + caption="Download CLI to install runtime" + max-width="30%" + %} + + +{::nomarkdown} +

+{:/} + + +## Upgrade the GitOps CLI + +The Codefresh CLI automatically self-checks its version, and if a newer version is available, prints a banner with the notification. + + {% include + image.html + lightbox="true" + file="/images/runtime/cli-upgrade-banner.png" + url="/images/runtime/cli-upgrade-banner.png" + alt="Upgrade banner for Codefresh CLI" + caption="Upgrade banner for Codefresh CLI" + max-width="40%" + %} + + +You can upgrade to a specific version if you so require, or download the latest version to an output folder to upgrade at your convenience. + + +* Do any of the following: + * To upgrade to the latest version, run: + `cf upgrade` + * To upgrade to a specific version, even an older version, run: + `cf upgrade --version v` + where: + `` is the version you want to upgrade to. + * To download the latest version to an output file, run: + `cf upgrade --version v -o ` + where: + * `` is the path to the destination file, for example, `/cli-download`. + +## Related articles +[Hosted GitOps Runtime setup]({{site.baseurl}}/docs/installation/gitops/hosted-runtime) +[Hybrid GitOps Runtime installation]({{site.baseurl}}/docs/installation/gitops/hybrid-gitops) diff --git a/_docs/dashboards/dora-metrics.md b/_docs/dashboards/dora-metrics.md new file mode 100644 index 000000000..6230c340d --- /dev/null +++ b/_docs/dashboards/dora-metrics.md @@ -0,0 +1,92 @@ +--- +title: "DORA metrics" +description: "Get insights into your deployments" +group: dashboards +toc: true +--- + +DevOps is a collaboration paradigm that is sometimes mistaken for being too abstract or too generic. In an effort to quantify the benefits of adopting DevOps, [Dora Research](https://www.devops-research.com/research.html#capabilities){:target="\_blank"} (acquired by Google in 2018), has introduced four key metrics that define specific goals for improving the software lifecycle in companies interested in adopting DevOps. + +DORA measures these metrics: + +* Deployment Frequency: How often an organization successfully releases to production +* Lead Time for Changes: The length of time for a commit to be deployed into production +* Change Failure Rate: The percentage of deployments causing a failure in production +* Time to Restore Service: The length of time for an organization to recover from a failure in production + +[Read more on DORA](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance){:target="\_blank"}. + +## DORA metrics in Codefresh + +Monitoring DORA metrics can help identify delivery issues in your organization by detecting bottlenecks among teams and optimize your workflows at technical or organizational levels. +Codefresh offers support for DORA metrics out of the box. + +* In the Codefresh UI, go to [DORA metrics](https://g.codefresh.io/2.0/dora-dashboard/dora){:target="\_blank"}. + +{% include +image.html +lightbox="true" +file="/images/reporting/dora-metrics.png" +url="/images/reporting/dora-metrics.png" +alt="DORA metrics report" +caption="DORA metrics report" +max-width="100%" +%} + +## Filters + +Use filters to define the exact subset of applications you are interested in. All filters support auto-complete and multiselect. +More than one option within the same filter type has an OR relationship. Multiple filter types when defined share an AND relationship. + +* Runtimes: Show metrics for applications from selected runtimes +* Clusters: Show metrics for applications deployed to selected clusters +* Applications: Show metrics for selected applications. +* Time: Show metrics from application for a specific time period + +> When no filters are defined, all metrics are shown for the last 90 days. + +## Metrics for favorite applications +If you have [starred applications as favorites]({{site.baseurl}}/docs/deployment/applications-dashboard/#applications-dashboard-information) in the Applications dashboard, clicking {::nomarkdown}{:/} in DORA metrics, displays DORA metrics only for those applications. + + +## Metric totals +As the title indicates, the Totals bar shows the total numbers, based on the filters defined, or for the last 90 days, if there are no filters: + +* Deployments +* Rollbacks +* Commits/Pull Requests +* Failure Rate: The number of failed deployments divided by the total number of deployments + +## Metric graphs +The metric graphs are key to performance insights with DORA metrics. The metrics are again based on the filters defined, or for the last 90 days if there are no filters. + +In addition, you can select the granularity for each graph: + +* Daily +* Weekly +* Monthly + +>Tip: + Remember that the graphs for the DORA metrics reflect metrics of application deployments, not workflows. + +**Deployment Frequency** + The frequency at which applications are deployed to production, including both successful (Healthy) and failed (Degraded), deployments. A deployment is considered an Argo CD sync where there was a change in the application source code that resulted in a new deployment of the application to production. + The X-axis charts the time based on the granularity selected, and the Y-axis charts the number of deployments. The number shown on the top right is the average deployment frequency based on granularity. + +**Lead Time for Changes** + The average number of days from the first commit for a PR (pull request) until the deployment date for the same PR. The key term here is _deployment_. Lead Time for Changes considers only those changes to workflows that result in a deployment. Making a change to a repo that does not result in a deployment is not included when calculating Lead Time for Changes. + The X-axis charts the time based on the granularity selected, and the Y-axis charts the time in minutes until the deployment. The number shown on the top right is the average number of days for a commit to reach production. + +**Change Failure Rate** + The failure or rollback rate in percentage for applications whose health status changed to Degraded on deployment. The key term here is _on deployment_. For example, bumping an image tag with one that does not exist, results in the application being Degraded on deployment, and designated as failed. + The Change Failure Rate is derived by dividing the number of Degraded (failed/rollback) deployments with the total number of deployments. + The X-axis charts the time based on the granularity selected, and the Y-axis charts the failure rate. The number shown on the top right is the average failure rate based on granularity, and therefore may not be equal to the Total Failure Rate. + +**Time to Restore Service** + The average number of hours taken for the status of Degraded deployments to return to Healthy. Again, similar to the Change Failure Rate, Time to Restore Service includes only deployments that became Degraded. It is derived by dividing the total number of hours for all Degraded deployments to return to Healthy by the total number of Degraded deployments. + The X-axis charts the time based on the granularity, and the Y-axis charts the time in hours. The number shown on the top right is the average number of hours between the previous deployment and rollback for the same application. + +## Related articles +[Global analytics dashboard]({{site.baseurl}}/docs/dashboards/home-dashboard) +[Monitoring applications]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/) + diff --git a/_docs/reporting/home-dashboard.md b/_docs/dashboards/home-dashboard.md similarity index 96% rename from _docs/reporting/home-dashboard.md rename to _docs/dashboards/home-dashboard.md index c3a402c05..be0e4e505 100644 --- a/_docs/reporting/home-dashboard.md +++ b/_docs/dashboards/home-dashboard.md @@ -1,7 +1,7 @@ --- -title: "Home dashboard" +title: "Global analytics dashboard" description: "" -group: reporting +group: dashboards toc: true --- @@ -135,8 +135,8 @@ Analytics are derived by comparing the selected date range to the corresponding |**Longest Delivery Pipelines** | Up to ten pipelines with the longest duration. The same KPIs are displayed, and compared to those in the reference period. | ### Related articles -[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) -[Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/) -[Images in Codefresh]({{site.baseurl}}/docs/deployment/images/) +[DORA metrics]({{site.baseurl}}/docs/dashboards/dora-metrics/) +[Monitoring applications]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/) +[Images in Codefresh]({{site.baseurl}}/docs/deployments/gitops/images/) diff --git a/_docs/deployment/applications-dashboard.md b/_docs/deployments/gitops/applications-dashboard.md similarity index 89% rename from _docs/deployment/applications-dashboard.md rename to _docs/deployments/gitops/applications-dashboard.md index 1e9609760..51907f811 100644 --- a/_docs/deployment/applications-dashboard.md +++ b/_docs/deployments/gitops/applications-dashboard.md @@ -1,7 +1,8 @@ --- -title: "Monitoring applications" +title: "Monitoring GitOps applications" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- @@ -27,15 +28,15 @@ Monitor the current [health and sync status of applications](#identify-applicati * [Monitor deployments for selected application](#monitor-deployments-for-selected-application) * [Monitor services for selected application](#monitor-services-for-selected-application) ->For information on creating and managing applications and application resources, see [Creating applications]({{site.baseurl}}/docs/deployment/create-application/) and [Managing applications]({{site.baseurl}}/docs/deployment/manage-application/). +>For information on creating and managing applications and application resources, see [Creating applications]({{site.baseurl}}/docs/deployments/gitops/create-application/) and [Managing applications]({{site.baseurl}}/docs/deployments/gitops/manage-application/). -### Select view mode for the Applications dashboard +## Select view mode for the Applications dashboard View deployed applications in either List (the default) or Card views. Both views are sorted by the most recent deployments. 1. In the Codefresh UI, go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard/list){:target="\_blank"}. 1. Select **List** or **Cards**. -#### Applications List view +### Applications List view Here is an example of the Applications dashboard in List view mode. @@ -49,7 +50,7 @@ caption="Applications Dashboard: List view" max-width="60%" %} -#### Applications Card view +### Applications Card view Here is an example of the Applications dashboard in Card view mode. The Card view provides a scannable view of application data and the actions to manage applications. {% include @@ -62,18 +63,18 @@ caption="Applications Dashboard: Card view" max-width="60%" %} -### Applications dashboard information +## Applications dashboard information Here's a description of the information and actions in the Applications dashboard. {: .table .table-bordered .table-hover} | Item | Description | | -------------- | -------------- | |Application filters | Filter by a range of attributes to customize the information in the dashboard to bring you what you need. {::nomarkdown}
  • Application state
    A snapshot that displays a breakdown of the deployed applications by their health status.
    Click a status to filter by applications that match it.
    Codefresh tracks Argo CD's set of health statuses. See the official documentation on Health sets. .
  • Application attributes
    Attribute filters support multi-selection, and results are based on an OR relationship within the same filter with multiple options, and an AND relationship between filters.
    Clicking More Filters gives you options to filter by Health status, Cluster names, Namespace, and Type.
    • Application Type: Can be any of the following
      • Applications: Standalone applications. See the official documentation on Applications.
      • ApplicationSet: Applications created using the ApplicationSet Custom Resource (CR) template. An ApplicationSet can generate single or multiple applications. See the official documentation on Generating Applications with ApplicationSet.
      • Git Source: Applications created by Codefresh that includes other applications and CI resources. See Git Sources.
    • Labels:The K8s labels defined for the applications. The list displays labels of all the applications, even if you have applied filters.
      To see the available labels, select Add, and then select the required label and one or more values.
      To filter by the labels, select Add and then Apply.
      See the official documentation on Labels and selectors.
{:/}| -|{::nomarkdown}{:/}| Star applications as favorites and view only the starred applications.{::nomarkdown}
Select the to star the application as a favorite.

To filter by favorite applications, on the filters bar, select .
{:/} TIP: If you star applications as favorites in the Applications dashboard, you can filter by the same applications in the [DORA metrics dashboard]({{site.baseurl}}/docs/reporting/dora-metrics/#metrics-for-favorite-applications). | -|Application actions| Options to monitor/manage applications through the application's context menu. {::nomarkdown}
  • Quick view
    A comprehensive read-only view of the deployment and definition information for the application.
  • {:/}See [Application Quick View](#view-deployment-and-configuration-info-for-selected-application) in this article.{::nomarkdown}
  • Synchronize/Sync
    Manually synchronize the application.
  • {:/}See [Manually sync applications]({{site.baseurl}}/docs/deployment/manage-application/#manually-synchronize-an-application).{::nomarkdown}
  • Edit
    Modify application definitions.
  • {:/}See [Edit application definitions]({{site.baseurl}}/docs/deployment/manage-application/#edit-application-definitions).{::nomarkdown}
  • Refresh and Hard Refresh: Available in Card view only. In List view, you must first select the application.
    • Refresh: Retrieve desired (Git) state, compare with the live (cluster) state, and refresh the application to sync with the desired state.
    • Hard Refresh: Refresh the application to sync with the Git state, while removing the cache.
    {:/} | +|{::nomarkdown}{:/}| Star applications as favorites and view only the starred applications.{::nomarkdown}
    Select the to star the application as a favorite.

    To filter by favorite applications, on the filters bar, select .
    {:/} TIP: If you star applications as favorites in the Applications dashboard, you can filter by the same applications in the [DORA metrics dashboard]({{site.baseurl}}/docs/reporting/dora-metrics/#metrics-for-favorite-applications). | +|Application actions| Options to monitor/manage applications through the application's context menu. {::nomarkdown}
    • Quick view
      A comprehensive read-only view of the deployment and definition information for the application.
    • {:/}See [Application Quick View](#view-deployment-and-configuration-info-for-selected-application) in this article.{::nomarkdown}
    • Synchronize/Sync
      Manually synchronize the application.
    • {:/}See [Manually sync applications]({{site.baseurl}}/docs/deployments/gitops/manage-application/#manually-synchronize-an-application).{::nomarkdown}
    • Edit
      Modify application definitions.
    • {:/}See [Edit application definitions]({{site.baseurl}}/docs/deployments/gitops/manage-application/#edit-application-definitions).{::nomarkdown}
    • Refresh and Hard Refresh: Available in Card view only. In List view, you must first select the application.
      • Refresh: Retrieve desired (Git) state, compare with the live (cluster) state, and refresh the application to sync with the desired state.
      • Hard Refresh: Refresh the application to sync with the Git state, while removing the cache.
      {:/} | -### Identify applications with warnings/errors +## Identify applications with warnings/errors Errors are flagged in the **Warnings/Errors** button, displayed at the top right of the Applications dashboard. Clicking the button shows the list of applications with the warnings/errors and the possible reasons for these. {% include @@ -97,7 +98,7 @@ All errors are Argo CD-generated errors. Codefresh generates custom warnings for
      {:/} -#### Warning: Missing Rollouts reporter in cluster +### Warning: Missing Rollouts reporter in cluster **Reason**: Codefresh has detected that Argo Rollouts is not installed on the target cluster. Rollout instructions are therefore not executed and the application is not deployed. Applications with `rollout` resources need Argo Rollouts on the target cluster, both to visualize rollouts in the Applications dashboard and control rollout steps with the Rollout Player. @@ -108,7 +109,7 @@ Applications with `rollout` resources need Argo Rollouts on the target cluster,
      {:/} -#### Warning: Long sync +### Warning: Long sync **Reason**: Ongoing sync for application exceeds 30 minutes (Argo CD's default duration for a sync operation). **Corrective Action**: @@ -119,7 +120,7 @@ Applications with `rollout` resources need Argo Rollouts on the target cluster, * Drill down into the application to investigate the issue and make changes. -### View deployment and configuration info for selected application +## View deployment and configuration info for selected application View deployment, definition, and event information for the selected application in a centralized location through the Quick View. A read-only view, the Quick View displays information on the application state and location, labels and annotations, parameters, sync options, manifest, status and sync events. @@ -165,7 +166,8 @@ max-width="50%" -##### Quick View: Summary +### Quick View: Summary + Displays health, sync status, and source and destination definitions. {% include @@ -178,7 +180,9 @@ caption="Application Quick View: Summary" max-width="30%" %} -##### Quick View: Metadata + +### Quick View: Metadata + Displays labels and annotations for the application. {% include @@ -191,7 +195,9 @@ caption="Application Quick View: Metadata" max-width="30%" %} -##### Quick View: Parameters + +### Quick View: Parameters + Displays parameters configured for the application, based on the tool used to create the application's manifests. The parameters displayed differ according to the tool: `directory` (as in the screenshot below), `Helm` charts, or `Kustomize` manifests, or the specific plugin. @@ -205,7 +211,8 @@ caption="Application Quick View: Parameters" max-width="30%" %} -##### Quick View: Sync Options +### Quick View: Sync Options + Displays sync options enabled for the application. {% include @@ -218,7 +225,8 @@ caption="Application Quick View: Parameters" max-width="30%" %} -##### Quick View: Manifest +### Quick View: Manifest + Displays the YAML version of the application manifest. {% include @@ -231,7 +239,8 @@ caption="Application Quick View: Manifest" max-width="30%" %} -##### Quick View: Events +### Quick View: Events + Displays status and sync events for the application. {% include @@ -244,7 +253,7 @@ caption="Application Quick View: Events" max-width="30%" %} -### Monitor health and sync statuses for selected application +## Monitor health and sync statuses for selected application Monitor the health status of the selected application, the current sync status, and the result of the previous sync operation. Once you select an application, the quickest option to monitor statuses is through the application header which is always displayed, no matter what tab you navigate to. @@ -274,7 +283,7 @@ max-width="40%" You can also view the current health and sync status for the application as a resource in the Current State tab. -### Monitor resources for selected application +## Monitor resources for selected application Monitor the resources deployed in the current version of the selected application in the Current State tab. Selecting an application from the Applications dashboard takes you to the Current State tab, which as its title indicates, displays the @@ -302,7 +311,7 @@ You can view application resources in [List or Tree views](#view-modes-for-appli > To quickly see which resources have been added, modified, or removed for the current or for a specific deployment, switch to the Timeline tab and expand the deployment record to show Updated Resources. See [Monitor resource updates for deployments](#monitor-resource-updates-for-deployments). -#### View modes for application resources +### View modes for application resources The Current State tab supports Tree and List view formats. * Tree view (default): A hierarchical, interactive visualization of the application and its resources. Useful for complex deployments with multiple clusters and large numbers of resources. See also [Working with resources in Tree view](#working-with-resources-in-tree-view). @@ -335,7 +344,7 @@ max-width="50%" -##### Working with resources in Tree view +#### Working with resources in Tree view The Tree view is designed to impart key information at a glance. Review the sections that follow for pointers to quickly get to what you need in the Tree view. **Context menu** @@ -415,7 +424,7 @@ max-width="50%" %} -#### Filters for application resources +### Filters for application resources Filters are common to both Tree and List views, and when applied are retained when switching between views. `IgnoreExtraneous` is a filter in [Argo CD](https://argo-cd.readthedocs.io/en/stable/user-guide/compare-options){:target="\_blank"} that allows you to hide specific resources from the Current State views. These resources are usually generated by a tool and their sync statuses have no impact on the sync status of the application. For example, `ConfigMap` and `pods`. The application remains in-sync even when such resources are syncing or out-of-sync. @@ -450,7 +459,7 @@ max-width="50%" %} -#### Health status for application resources +### Health status for application resources View and monitor health status of the selected application's resources in the Current State tab, in Tree or List views. Identify the health of an application resource through the color-coded border and the resource-type icon (Tree view), or the textual labels at the right of the resource (List view). @@ -458,18 +467,20 @@ Identify the health of an application resource through the color-coded border an {: .table .table-bordered .table-hover} | Health status | Description | Display in Tree view | | -------------- | ------------| ------------------| -| **Healthy** | Resource is functioning as required. | {::nomarkdown}{:/} | -| **Progressing** | Resource is not healthy but can become healthy before the timeout occurs.| {::nomarkdown}{:/} | -| **Suspended** | Resource is not functioning, and is either suspended or paused. For example, Cron job or a canary rollout.| {::nomarkdown}{:/} | + +| **Healthy** | Resource is functioning as required. | {::nomarkdown}{:/} | +| **Progressing** | Resource is not healthy but can become healthy before the timeout occurs.| {::nomarkdown}{:/} | +| **Suspended** | Resource is not functioning, and is either suspended or paused. For example, Cron job or a canary rollout.| {::nomarkdown}{:/} | | **Missing** | Resource is not present on the cluster. |{::nomarkdown}{:/} | -| **Degraded** | Resource is not healthy, or a timeout occurred before it could reach a healthy status.| {::nomarkdown}{:/} | -| **Unknown** | Resource does not have a health status, or the health status is not tracked in Argo CD. For example,`ConfigMaps` resource types. | {::nomarkdown}{:/} | +| **Degraded** | Resource is not healthy, or a timeout occurred before it could reach a healthy status.| {::nomarkdown}{:/} | +| **Unknown** | Resource does not have a health status, or the health status is not tracked in Argo CD. For example,`ConfigMaps` resource types. | {::nomarkdown}{:/} | + See also [Argo CD's set of health checks](https://argo-cd.readthedocs.io/en/stable/operator-manual/health/){:target="\_blank"}. -#### Sync status for application resources +### Sync status for application resources Similar to the health status, the Current State also tracks the sync status of all application resources. The sync status identifies if the live state of the application resource on the cluster is synced with its desired state in Git. Identify the sync status through the icon on the left of the resource name and the color of the resource name (Tree view), or the textual labels at the right of the resource (List view). @@ -479,15 +490,17 @@ The table describes the possible sync statuses for an application resource, and {: .table .table-bordered .table-hover} | Sync state | Description |Display in Tree view | | -------------- | ---------- | ---------- | -| **Synced** | The live state of the resource on the cluster is identical to the desired state in Git.| {::nomarkdown}{:/} | -| **Syncing** | The live state of the resource was not identical to the desired state, and is currently being synced.| {::nomarkdown}{:/} | -| **Out-of-Sync** | {::nomarkdown}The live state is not identical to the desired state.
      To sync a resource, select the Sync option from the resource's context menu in Tree view. {:/}| {::nomarkdown}{:/} | -| **Unknown** | The sync status could not be determined. | {::nomarkdown}{:/} | + +| **Synced** | The live state of the resource on the cluster is identical to the desired state in Git.| {::nomarkdown}{:/} | +| **Syncing** | The live state of the resource was not identical to the desired state, and is currently being synced.| {::nomarkdown}{:/} | +| **Out-of-Sync** | {::nomarkdown}The live state is not identical to the desired state.
      To sync a resource, select the Sync option from the resource's context menu in Tree view. {:/}| {::nomarkdown}{:/} | +| **Unknown** | The sync status could not be determined. | {::nomarkdown}{:/} | + > The application header displays the statuses of the current and previous sync operations. Clicking **More** opens the Sync panels with Sync Info, Sync Result and Commit Info. The Application Warnings/Errors panel surfaces sync errors on exceeding the maximum number of retries and when a sync operation extends beyond 30 minutes. -#### Manifests for application resources +### Manifests for application resources In either Tree or List views, double-click an application resource to see its manifests. The manifests are displayed in the Summary tab. > Based on the selected resource type, you can also view logs, and events. Endpoints for example show only manifests, while pods show manifests, logs, and events. @@ -519,7 +532,7 @@ Here's what you can see and do in the Summary tab:
      {:/} -#### Logs for application resources +### Logs for application resources In either Tree or List views, double-click an application resource to see its logs. Logs are available only for resource types such as pods. {% include @@ -541,7 +554,7 @@ max-width="50%"
      {:/} -#### Events for application resources +### Events for application resources In either Tree or List views, double-click an application resource to see events in the Events tab. > If your runtime is lower than the version required to view events, you are notified to upgrade to the required version. @@ -563,7 +576,7 @@ max-width="50%" -### Monitor deployments for selected application +## Monitor deployments for selected application Monitor an ongoing deployment for the selected application, and review its historical deployments. The Timeline tab displays the history of deployments for the selected application, sorted by the most recent deployment (default), labeled **Current Version** at the top. @@ -600,7 +613,7 @@ caption="Applications Dashboard: Deployment chart" max-width="30%" %} -#### Monitor CI details by deployment +### Monitor CI details by deployment Each deployment record displays the complete CI history for that deployment. @@ -611,7 +624,7 @@ Each deployment record displays the complete CI history for that deployment. * The **Committer** who made the changes. -#### Monitor updated resources by deployment +### Monitor updated resources by deployment Each deployment record also identifies the resources that were changed (created, updated, or removed) as part of that deployment in **Updated Resources**. You can trace the history of a resource, from the original to their final versions. For each version, you can see the actual change or changes through the Diff view. The Full View shows the complete resource manifest, with the diff view of the changes, while the Compact View shows only those lines with the changes. > For detailed information on the current state of a resource, switch to the Current State tab and click the resource node. See [Monitoring application resources](#monitoring-application-resources). @@ -657,15 +670,15 @@ max-width="70%" -#### Monitor rollouts by deployment +### Monitor rollouts by deployment A rollout is initiated when there is an Argo CD sync due to a change in the desired state. Visualize ongoing and completed rollouts by deployments in **Services**. -> To view and manage a rollout, you must have an Argo `rollout` resource defined for your application, and [install Argo Rollouts in the cluster]({site.baseurl}}/docs/_docs/deployment/install-argo-rollouts). +> To view and manage a rollout, you must have an Argo `rollout` resource defined for your application, and [install Argo Rollouts in the cluster]({site.baseurl}}/docs/deployments/gitops/install-argo-rollouts). For detailed information on Argo Rollouts, see [Argo Rollouts documentation](https://argoproj.github.io/argo-rollouts/){:target="\_blank"}. -##### Rollout progress +#### Rollout progress For an ongoing rollout, the rollout bar displays the progress of the rollout. You can also visualize the steps in the rollout, and control the rollout using the options in the Rollout Player. Here is an example of an ongoing rollout for a canary deployment in Updated Services. The rollout comprising four steps has not started, and no traffic has not been routed as yet to the new version of the application. @@ -693,7 +706,7 @@ caption="Rollout completed for deployment" max-width="50%" %} -##### Manage ongoing rollout +#### Manage ongoing rollout Click the rollout name to visualize its steps. Manually manage the rollout through the controls in the Rollout Player. Here you can see that two out of four steps have been completed, 25% of the traffic has been routed, and the rollout has been paused for the defined length of time. @@ -720,7 +733,7 @@ The table lists the controls in the Rollout Player to manage an ongoing rollout. -##### View analysis run +#### View analysis run If you have defined an analysis template for the rollout, you can check the run results and the manifest. The result of an analysis run determines if the rollout is completed, paused, or aborted. For detailed information, see the [Analysis section in Argo Rollouts](https://argoproj.github.io/argo-rollouts/features/analysis/){:target="\_blank"}. @@ -749,7 +762,7 @@ max-width="50%" %} -### Monitor services for selected application +## Monitor services for selected application The Services tab shows the K8s services for each deployment of the application. Each service shows the number of replicas, the endpoint IP, the labels that reference the application, and the health status. @@ -765,6 +778,11 @@ caption="Applications Dashboard: Services tab" max-width="50%" %} +## Related articles +[Creating GitOps applications]({{site.baseurl}}/docs/deployments/gitops/create-application) +[Managing GitOps applications]({{site.baseurl}}/docs/deployments/gitops/manage-applications) +[Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) +[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) diff --git a/_docs/deployment/create-application.md b/_docs/deployments/gitops/create-application.md similarity index 96% rename from _docs/deployment/create-application.md rename to _docs/deployments/gitops/create-application.md index 47aea2d77..274251725 100644 --- a/_docs/deployment/create-application.md +++ b/_docs/deployments/gitops/create-application.md @@ -1,7 +1,8 @@ --- -title: "Creating applications" +title: "Creating GitOps applications" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- @@ -19,7 +20,7 @@ Codefresh provides all the options and functionality to create and manage Argo C * Edit and delete applications Once the application is created and synced to the cluster, it is displayed in the Applications dashboard. Here, you can select an application to update the application's configuration settings, or delete it. - To monitor the health and sync status, deployments, and resources for the application, see [Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/). + To monitor the health and sync status, deployments, and resources for the application, see [Monitoring GitOps applications]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/). ### Application: Definitions Application definitions include the name, runtime, and the name of the YAML manifest. By default, the YAML manifest has the same name as that of the application. @@ -225,7 +226,7 @@ Track the application in the [Applications dashboard](https://g.codefresh.io/2.0 ### Related articles -[Monitoring applications]({{site.baseurl}})/docs/deployment/applications-dashboard) -[Managing applications]({{site.baseurl}})/docs/deployment/manage-applications) -[Home dashboard]({{site.baseurl}})docs/reporting/home-dashboard) -[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) \ No newline at end of file +[Monitoring GitOps applications]({{site.baseurl}})/docs/deployments/gitops/applications-dashboard) +[Managing GitOps applications]({{site.baseurl}})/docs/deployments/gitops/manage-applications) +[Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) +[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) \ No newline at end of file diff --git a/_docs/deployment/images.md b/_docs/deployments/gitops/images.md similarity index 92% rename from _docs/deployment/images.md rename to _docs/deployments/gitops/images.md index d4538a528..ebe6af780 100644 --- a/_docs/deployment/images.md +++ b/_docs/deployments/gitops/images.md @@ -1,11 +1,12 @@ --- title: "Images in Codefresh" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- -Building Docker images is one of the most basic requirements for creating Delivery Pipelines. +Building Docker images is one of the most basic requirements for creating Codefresh pipelines and Argo Workflows. Once you create an image, push the image to a registry, and report it to Codefresh, image information is continually updated in the Images page. ### Requirements for Images in Codefresh @@ -18,7 +19,7 @@ Complete the mandatory steps to see your Images in the Codefresh UI. Each step h 1. (Mandatory) Report image information to Codefresh. See the [report-image-info](https://github.com/codefresh-io/argo-hub/blob/main/workflows/codefresh-csdp/versions/0.0.6/docs/report-image-info.md){:target="\_blank"} example. -> If you are using an external GitHub Actions-based pipeline, we have a new template that combines image reporting and enrichment. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). +> If you are using an external GitHub Actions-based pipeline, we have a new template that combines image reporting and enrichment. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/gitops/image-enrichment-overview/). ### Image views in Codefresh * In the Codefresh UI, go to [Images](https://g.codefresh.io/2.0/images){:target="\_blank"}. @@ -111,3 +112,10 @@ Selecting **more details** for an image tag. | **3** | The Git details for this image tag, such as the Git hash, the Jira issue number, Git Pull Request, commit information, the name of the user who performed the commit. | | **4** | The workflow for the image step. Select to go to the Workflow.| | **5** | The log information for the build image step in the relevant workflow. Select to view Logs panel. | + +## Related articles + +[Creating GitOps applications]({{site.baseurl}}/docs/deployments/gitops/create-application) +[Managing GitOps applications]({{site.baseurl}}/docs/deployments/gitops/manage-applications) +[Image enrichment with integrations]({{site.baseurl}}/integrations/image-enrichment-overview) +[Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) diff --git a/_docs/deployment/install-argo-rollouts.md b/_docs/deployments/gitops/install-argo-rollouts.md similarity index 74% rename from _docs/deployment/install-argo-rollouts.md rename to _docs/deployments/gitops/install-argo-rollouts.md index 22f6afb73..0847b58c3 100644 --- a/_docs/deployment/install-argo-rollouts.md +++ b/_docs/deployments/gitops/install-argo-rollouts.md @@ -1,12 +1,13 @@ --- -title: "Install Argo Rollouts" +title: "Progressive delivery with GitOps" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- -Install Argo Rollouts on managed clusters with a single click. With Argo Rollouts installed on your cluster, you can visualize rollout progress for deployed applications in the [Applications dashboard]({{site.baseurl}}/docs/deployment/applications-dashboard/#rollout-progress-visualization). +Install Argo Rollouts on managed clusters with a single click. With Argo Rollouts installed on your cluster, you can visualize rollout progress for deployed applications in the [Applications dashboard]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/#rollout-progress-visualization). If Argo Rollouts has not been installed, an **Install Argo Rollouts** button is displayed on selecting the managed cluster. 1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. @@ -24,4 +25,4 @@ If Argo Rollouts has not been installed, an **Install Argo Rollouts** button is %} ### Related articles -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) \ No newline at end of file +[Add external clusters to runtimes]({{site.baseurl}}/docs/installation/managed-cluster/) \ No newline at end of file diff --git a/_docs/deployment/manage-application.md b/_docs/deployments/gitops/manage-application.md similarity index 96% rename from _docs/deployment/manage-application.md rename to _docs/deployments/gitops/manage-application.md index 2ccd3e16e..15a0049d9 100644 --- a/_docs/deployment/manage-application.md +++ b/_docs/deployments/gitops/manage-application.md @@ -1,7 +1,8 @@ --- -title: "Managing applications" +title: "Managing GitOps applications" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- @@ -49,8 +50,8 @@ Update General or Advanced configuration settings for a deployed application thr {:start="3"} 1. Update the **General** or **Advanced** configuration settings as needed: - [General configuration]({{site.baseurl}}/docs/deployment/create-application/#application-general-configuration-settings) - [Advanced configuration]({{site.baseurl}}/docs/deployment/create-application/#application-advanced-configuration-settings) + [General configuration]({{site.baseurl}}/docs/deployments/gitops/create-application/#application-general-configuration-settings) + [Advanced configuration]({{site.baseurl}}/docs/deployments/gitops/create-application/#application-advanced-configuration-settings) When you change a setting, the Commit and Discard Changes buttons are displayed. {% include @@ -218,7 +219,7 @@ For example, if you made changes to `api` resources or `audit` resources, type ` Delete an application from Codefresh. Deleting an application deletes the manifest from the Git repository, and then from the cluster where it is deployed. When deleted from the cluster, the application is removed from the Applications dashboard in Codefresh. >**Prune resources** in the application's General settings determines the scope of the delete action. -When selected, both the application and its resources are deleted. When cleared, only the application is deleted. For more information, review [Sync settings]({{site.baseurl}}/docs/deployment/create-application/#sync-settings). +When selected, both the application and its resources are deleted. When cleared, only the application is deleted. For more information, review [Sync settings]({{site.baseurl}}/docs/deployments/gitops/create-application/#sync-settings). Codefresh warns you of the implication of deleting the selected application in the Delete form. 1. In the Codefresh UI, go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard/list){:target="\_blank"}. @@ -351,10 +352,9 @@ The table describes the options for the `Rollout` resource. ### Related articles -[Creating applications]({{site.baseurl}}/docs/deployment/create-application) +[Creating GitOps applications]({{site.baseurl}}/docs/deployments/gitops/create-application) [Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) -[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics) - +[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics) diff --git a/_docs/deployments/helm/custom-helm-uploads.md b/_docs/deployments/helm/custom-helm-uploads.md new file mode 100644 index 000000000..33aa06d54 --- /dev/null +++ b/_docs/deployments/helm/custom-helm-uploads.md @@ -0,0 +1,125 @@ +--- +title: "Creating and uploading Helm packages" +description: "Manually create and upload Helm packages" +group: deployments +sub_group: helm +redirect_from: + - /docs/create-helm-artifacts-using-codefresh-pipeline/ +toc: true +--- + +Helm packages are just TAR files. Helm repositories are simple file hierarchies with an extra [index.yaml](https://helm.sh/docs/developing_charts/#the-chart-repository-structure){:target="\_blank"}. +You can run custom commands and manually upload indexes and packages to a Helm repo. + +>This articles shows some non-standard Helm examples. + For the basic use cases, or if you are just getting started with Helm, see our [Helm quick start guide]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm/) and [Using Helm in pipelines]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + +## Package a Helm chart +Below is an example of a freestyle step in a Codefresh pipeline that packages the Helm chart and then extracts the chart name from the command output. It also saves that package name in an environment variable for later use. + + `YAML` +{% highlight yaml %} +{% raw %} +helm_package: + image: devth/helm + commands: + - cf_export PACKAGE=$(helm package | cut -d " " -f 8) +{% endraw %} +{% endhighlight %} + +The `helm package` command expects a path to an unpacked chart. Replace `` in the example with the directory that holds your chart files. Note that this directory must have the same name as the chart name, as per Helm requirements.
      +See [Helm package docs](https://github.com/kubernetes/helm/blob/master/docs/helm/helm_package.md){:target="_blank"} and [Helm charts overview](https://github.com/kubernetes/helm/blob/master/docs/charts.md){:target="_blank"} for more information. + +{{site.data.callout.callout_info}} +To use `cf_export`and make the variable available to other steps in the pipeline, see [Variables in pipelines]({{site.baseurl}}/docs/pipelines/variables). +{{site.data.callout.end}} + +## Example 1: Push the chart to GCS based Helm Repository +The first example pushes the packaged chart into a public cloud storage service, like AWS S3, Azure Storage, or Google Cloud Storage. We chose Google Cloud Storage (GCS) for this example. +Our pipeline has three steps: + +{:start="1"} +1. download_index: download the Helm `index.yaml` file from GCS, or create one of it's not there. + +{:start="2"} +2. helm_package_merge: package the chart as described earlier, and also merge the new package into the downloaded `index.yaml` file, using the `helm repo index --merge` command. + +{:start="3"} +3. push_gcs: upload the updated `index.yaml` file and the newly created package to GCS. + + `YAML` +{% highlight yaml %} +{% raw %} +steps: + download_index: + image: appropriate/curl:latest + commands: + - 'curl https://storage.googleapis.com/$GOOGLE_BUCKET_NAME/index.yaml --output ./index.yaml --fail || :' + - '[ ! -f ./index.yaml ] && echo "apiVersion: v1">./index.yaml' + helm_package_merge: + image: devth/helm + commands: + - cf_export PACKAGE=$(helm package | cut -d " " -f 8) + - helm repo index --merge ./index.yaml . + push_gcs: + image: camil/gsutil + commands: + - echo -E $GOOGLE_CREDENTIALS > /gcs-creds.json + - echo -e "[Credentials]\ngs_service_key_file = /gcs-creds.json\n[GSUtil]\ndefault_project_id = $GOOGLE_PROJECT_ID" > /root/.boto + - gsutil cp ./index.yaml gs://$GOOGLE_BUCKET_NAME + - gsutil cp $PACKAGE gs://$GOOGLE_BUCKET_NAME +{% endraw %} +{% endhighlight %} + + +### Environment setup + +This pipeline references some predefined environment variables such as `GOOGLE_BUCKET_NAME`, `GOOGLE_PROJECT_ID` and `GOOGLE_CREDENTIALS`. +For this example, we created a service account with appropriate permissions in Google Cloud, and saved the credentials into `GOOGLE_CREDENTIALS` as a Codefresh Secret.
      +For more information, see: +[Authenticating with Google services](https://cloud.google.com/storage/docs/authentication#service_accounts){:target="_blank"}.
      +[Codefresh pipeline configuration and secrets](https://codefresh.io/docs/docs/codefresh-yaml/variables/#user-provided-variables){:target="_blank"}. + +## Example 2: Push the chart to Chart Museum +Chart Museum is a Helm repository *server* that has an HTTP API, pluggable backends, authentication, and more. +Read more about [Chart Museum](https://github.com/kubernetes-helm/chartmuseum){:target="_blank"}. + +In this example, we already have a Chart Museum server running, so we'll push the packaged chart to it. + +The steps will be: + +{:start="1"} +1. helm_package: package the chart as described earlier. + +{:start="2"} +2. get_repo_url: In order to avoid hard-coding the repository URL into the pipeline, we will retrieve it from the Codefresh Helm integration. +In this case, we have added our repository with Codefresh as described in [Using external Helml repos in Codefresh pipelines]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories). +Replace `` in the example with the name you gave to your repository when you added it to Codefresh. + +{:start="3"} +3. helm_push: call the Chart Museum HTTP api to just upload the package. Chart Museum will take care of the rest. + + `YAML` +{% highlight yaml %} +{% raw %} +steps: + helm_package: + image: devth/helm + commands: + - cf_export PACKAGE=$(helm package | cut -d " " -f 8) + get_repo_url: + image: codefresh/cli:latest + commands: + - cf_export HELM_URL=$(codefresh get ctx -o=yaml | grep repositoryUrl | cut -d "'" -f 2) + helm_push: + image: appropriate/curl + commands: + - curl --data-binary "@$PACKAGE" $HELM_URL/api/charts +{% endraw %} +{% endhighlight %} + + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Using a managed Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[Helm environment promotion]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) diff --git a/_docs/deployments/helm/helm-charts-and-repositories.md b/_docs/deployments/helm/helm-charts-and-repositories.md new file mode 100644 index 000000000..5973f4147 --- /dev/null +++ b/_docs/deployments/helm/helm-charts-and-repositories.md @@ -0,0 +1,111 @@ +--- +title: "Using external Helm repos in Codefresh pipelines" +description: "Use external Helm Charts and repositories in Codefresh pipelines" +group: deployments +sub_group: helm +toc: true +--- +Codefresh allows you to integrate with external Helm repositories and Helm charts in the Helm Charts page. +It is optional to use external Helm repositories as all Codefresh accounts already include a [built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/). + +## Add an external Helm repository + +Easily add your own Helm charts. +By default, we show charts from the [official Helm repository](https://github.com/kubernetes/charts){:target="_blank"}. + +1. In the Codefresh UI, from the Artifacts section in the sidebar, select [**Helm Charts**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. On the top right, click **Add Existing Helm Repository**. + You are taken to Pipeline Integrations. +1. In the Integrations page, click **Add Helm Repository**, and then select the type of Helm repo to add from the list. +1. Enter the **Helm repository name** and **URL**. + Do not include the specific path to `index.yaml` in the URL. + +{% include image.html +lightbox="true" +file="/images/deployments/helm/quick-helm-integration.png" +url="/images/deployments/helm/quick-helm-integration.png" +alt="Adding a Helm repository" +caption="Adding a Helm repository" +max-width="70%" +%} + +1. If your repository doesn't require authentication, to complete the process, click **Save**. + +For more details on adding Helm repositories, see [Helm integrations]({{site.baseurl}}/docs/integrations/helm/). + +## Use a Helm repository in a Codefresh pipeline + +Once connected, inject any Helm repository context into Codefresh pipelines. + +1. From the Pipelines page, select the pipeline into which to import the Helm configuation. +1. In the Workflows tab, do one of the following: + * Click **Variables** on the right, and then click the **Settings** (gear) icon. + * Click the context menu next to the settings icon. +1. Click on **Import from/Add shared configuration**, and select the name of the repository. + The repository settings are injected as environment variables into the pipeline. + +{% include image.html +lightbox="true" +file="/images/deployments/helm/connect-helm-repo.png" +url="/images/deployments/helm/connect-helm-repo.png" +alt="Connecting a Helm repository in the pipeline" +caption="Connecting a Helm repository in the pipeline" +max-width="70%" +%} + +1. If you are using the Helm step, the step uses these settings to connect to your authenticated repository automatically. For details, see [Using Helm in Codefresh pipelines]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + +## Install a chart from your Helm repository +Install a chart from a Helm repository to your cluster. + +* Values in the Chart Install wizard are provided in the following order: + 1. Chart Default Values (implicitly part of the chart). + 2. Overridden default values (provided as values file, provided only if edited by the user). + 3. Supplied values files from Yaml Shared Configuration. + 4. Override variables are provided as `--set` arguments. +* Variables available for custom pipelines: + If you select a custom pipeline, the following variables are available: + * `CF_HELM_RELEASE` - name of release + * `CF_HELM_KUBE_CONTEXT` - kubectl context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/#work-with-your-services)) + * `CF_HELM_INSTALLATION_NAMESPACE` - desired namespace for the release + * `CF_HELM_CHART_VERSION` - Chart Version, + * `CF_HELM_CHART_NAME` - Chart Name + * `CF_HELM_CONTEXTS` - values from [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/#using-shared-helm-values) + * `CF_HELM_VALUES` - extra values + * `CF_HELM_SET` - extra values, + * `CF_HELM_CHART_REPO_URL` - URL of Chart repository + * `CF_HELM_COMMIT_MESSAGE` - Message to show in Helm GUI, + +
      + +**Before you begin** +* Make sure tht you have a Kubernetes integration with the cluster and namespace, as described [here]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/) + +**How to** +1. In the Codefresh UI, from the Artifacts section in the sidebar, select [**Helm Charts**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. In the row with the chart to install, click **Install**. +1. Enter the **Release name** for the chart, and select the **Chart version** to install. +1. From Cluster Information, select a Kubernetes **Cluster** and the **Namespace** to install to. +1. Select the **Pipeline** to install to. +1. If required, edit the **Default Chart Values** to view and override them. + When the default values yaml is changed, it is provided to Helm install as a values file. You can revert to the original values cby clicking Revert. +1. To provide additional values files, do the following: + * From the **Import from configuration** list, select **Add new context of type: YAML**. + * Enter the **Context name**. + * Insert your values YAML, and click **Save**. + The YAML is saved and added to the list of configuration files that you can import from. +1. To override variable values, click **+Add variable**, and then enter the Key and Value. + > The order of value configurations matter for Helm: most recently provided values override earlier ones. +1. Click **Install**. You can observe the newly installed release in Helm Releases. + +You can also install Helm releases from [any Helm environment board]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion). + + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm integrations]({{site.baseurl}}/docs/integrations/helm/) +[Helm Dashboard]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) +[Helm Promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) +[Helm best practices]({{site.baseurl}}/docs/ci-cd-guides/helm-best-practices/) + + diff --git a/_docs/deployments/helm/helm-environment-promotion.md b/_docs/deployments/helm/helm-environment-promotion.md new file mode 100644 index 000000000..21466e5dc --- /dev/null +++ b/_docs/deployments/helm/helm-environment-promotion.md @@ -0,0 +1,290 @@ +--- + +title: "Promoting Helm Environments" +description: "Manage your Helm Environments with the Codefresh UI" +group: deployments +sub_group: helm +toc: true +--- +Apart from the [Helm Releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) that show your Kubernetes clusters at the application level, Codefresh also comes with a special environment board that allows you to track one or more applications as they move within your infrastructure (example, Dev, QA, Prod). + +The environment board can function both as an overview of the whole lifecycle of the application, as well as a tool to shift-left/right Helm releases between environments. + +Here is an example board: + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/board.png" +url="/images/deployments/helm/promotion/board.png" +alt="Helm Environment Dashboard" +caption="Helm Environment Dashboard" +max-width="80%" +%} + +This board has three environments that correspond to Kubernetes clusters: + * A Load-testing environment where applications are stress-tested + * A Staging environment where smoke tests are performed + * The Production environment where applications go live + +You can see that a Python example app at version 0.2.0 is already in production. Version 0.3.0 is waiting in the staging environment for smoke tests. Once it is tested it can be dragged to the production column therefore *promoting* it to production status. + + +## Using the Helm Environment Board + +You can create and manage as many Helm promotion boards as you want. +For each board, you define how many columns it will contain, where each column is a Helm-enabled Kubernetes cluster. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/helm-environments.png" +url="/images/deployments/helm/promotion/helm-environments.png" +alt="Helm environments column structure" +caption="Helm environments column structure" +max-width="80%" +%} + +You can use different clusters for each column or different namespaces from the same cluster. You can even mix and match both approaches. +As an example, you could create a Helm board with the following environments: + +* Column 1, dev cluster showing all namespaces (DEV) +* Column 2, namespace qa from cluster staging (QA) +* Column 3, namespace staging from cluster staging (STAGING) +* Column 4, namespace production from cluster prod (PRODUCTION) + +Once you have your columns in place, you can move Helm releases between clusters/namespaces by drag-n-drop. Each Helm release can be dragged to any other column either promoting it, for example, from QA to Production, or shifting it left, for example, from Production to QA. + +## Creating a custom Helm Board + +Create your own Helm board with a single or multiple Helm applications. You can create as many boards as you want. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Boards**](https://g.codefresh.io/helm/helm-kanban/){:target="\_blank"}. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/board-selection.png" +url="/images/deployments/helm/promotion/board-selection.png" +alt="Helm board selection" +caption="Helm board selection" +max-width="80%" +%} + +{:start="2"} +1. On the top-right, click **Add board**. +1. Enter the title of your board as the **Board Name**. +1. Optional. In the **Release name regex expression** field, enter the Regex expression for this board to filter all its environments to show only Helm releases that match this regular expression. + Regex expressions are very helpful if you want your environment board to focus only on a single or set of Helm applications. + To see all Helm releases of your clusters, leave empty. + +You can edit both options for an existing board if you change your mind later. + +### Define Clusters/Namespaces for each Environment + +Once you create your Helm environment board, you are ready to define its columns. + +* To add a column, on the top-right, click **Add environment***. + You will see the environment details dialog: + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/edit-helm-environment.png" +url="/images/deployments/helm/promotion/edit-helm-environment.png" +alt="Edit Helm environment" +caption="Edit Helm environment" +max-width="50%" +%} + + For each environment you can select: + * A name for that column + * The Kubernetes cluster it corresponds to + * One or more namespaces that define this environment (You can even toggle the switch for a regex match) + * A custom pipeline that will be used when a Helm release is installed for the first time in this column + * A custom pipeline that will be used when a Helm release is dragged in this column (promoted from another column) + * Optional. One or more charts to use for the environment. Defining charts for the environment saves you from having to search through all the charts in your Helm repository. When you install an application from the install graphical dialog, only the selected chart(s) are displayed. + * A presentation color to easily identify the environment on the board (For example, a "production" environment should have a red color) + +You can also select no namespace at all. In that case, the column will show Helm releases for all namespaces in that cluster. +You can change all these options after creation, so feel free to change your mind. + +Repeat the same process for additional environments. Remember that you can name your environment as you want and define any combination of cluster/namespace for any of the columns. This gives you a lot of power to define a Helm environment board that matches exactly your own process. + +You don't have to define the environments in order. You can drag-n-drop columns to change their order after the initial creation. + + +### Installing Helm Releases on each Environment + +If you already have [pipelines that deploy Helm releases]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/), your columns are populated automatically with information. + +For each Helm release, you will get some basic details such as the chart version and the name of the release. You can expand a release by clicking on the arrow button to get additional information such as the docker images and the replicas of each pod that are contained in the release. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/expand.png" +url="/images/deployments/helm/promotion/expand.png" +alt="Helm release details" +caption="Helm release details" +max-width="50%" +%} + +You can even install manually a Helm release from any external repository by clicking on the *PLUS* button at the header of each column. In that case you will see a list of possible Helm applications to choose from. + +You will be able to select the target cluster and namespace as well as the chart values [as any other Helm release]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#install-chart-from-your-helm-repository). + + +## Moving Releases between Environments + +A Helm environment board can be used by different stakeholders in order to get the detailed status of all defined environments. In that aspect it can act as a read-only tool that simply shows the results of Codefresh pipelines that deploy Helm applications. + +### Promoting Helm Releases with the UI + +You can also use the board as an action tool in order to promote/demote a Helm release between individual environments. To move a Helm release between environments just drag-n-drop it to a different column. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/shift-right.png" +url="/images/deployments/helm/promotion/shift-right.png" +alt="Promoting a Helm release" +caption="Promoting a Helm release" +max-width="80%" +%} + +Once you drop the release you will also see the promotion dialog. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/promote-settings.png" +url="/images/deployments/helm/promotion/promote-settings.png" +alt="Promotion Settings" +caption="Promotion Settings" +max-width="40%" +%} + +All fields here will be auto-filled according to the Helm release that you dragged. You can also choose a custom pipeline (see below) for the promotion if you don't want to use the default one. + +By clicking the *Variables* button you can override the chart values, import a specific shared configuration or add new values. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/value-options.png" +url="/images/deployments/helm/promotion/value-options.png" +alt="Changing deployment values" +caption="Changing deployment values" +max-width="40%" +%} + +By default Codefresh will use a built-in install/upgrade pipeline for performing the promotion. You can choose your own pipeline from the promotion dialog. That pipeline will be automatically provided with the following [environment variables]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/#overriding-the-default-helm-actions): + +* `CF_HELM_RELEASE` - name of release +* `CF_HELM_KUBE_CONTEXT` - `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_NAMESPACE` - Tiller Namespace if you use Helm 2 +* `CF_HELM_INSTALLATION_NAMESPACE` - namespace where release is promoted to +* `CF_HELM_CONTEXTS` - [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration) Helm contexts +* `CF_HELM_VALUES` - Helm chart values +* `CF_HELM_SET` - Additional values there were overriden +* `CF_HELM_CHART_JSON_GZIP` - Gzipped JSON of Helm chart (only for Helm 3) +* `CF_HELM_CHART_JSON` - JSON of Helm chart (only for Helm 2) +* `CF_HELM_BOARD` - Name of the board that is used for the drag-n-drop-action +* `CF_HELM_TARGET_SECTION` - Name of the Source Environment that you are promoting from +* `CF_HELM_SOURCE_SECTION` - Name of the Target Environment that you are promoting to + + +Note that the variable `CF_HELM_CHART_JSON_GZIP` is both compressed and base64 encoded. To get the raw value you need a command like `echo $CF_HELM_CHART_JSON_GZIP | base64 -d | gunzip` + +>Overriding the default pipeline can only be done by [Codefresh admin users]({{site.baseurl}}/docs/administration/access-control/#users-and-administrators). + +Once you click the *update* button, a new build will run that will perform the deployment. + +Note that you can move releases to any column both on the right and on the left of the current column. This is helpful if for example you find a bug in your production environment and you want to bring it back to a staging environment for debugging. + +### Promoting Helm releases programmatically + +You can also promote Helm releases with the [Codefresh CLI](https://codefresh-io.github.io/cli/predefined-pipelines/promote-helm-release/){:target="\_blank"}. + +Once you have [installed the CLI](https://codefresh-io.github.io/cli/getting-started/){:target="\_blank"}, you can use it from an external script or terminal with the `helm-promotion` parameter: + +{% highlight shell %} +{% raw %} +codefresh helm-promotion --board MySampleBoard --source Staging --target Production --source-release my-app --set myenv=prod +{% endraw %} +{% endhighlight %} + +Here we promote the Helm release `my-app` to the *Production* column overriding also the `myenv` value. + +Remember that the Codefresh CLI can also run in a Codefresh pipeline with a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +Here is an example of a Helm promotion from within a Codefresh pipeline. + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + triggerstep: + title: trigger + image: codefresh/cli + commands: + - 'codefresh helm-promotion --board MySampleBoard --source Staging --target Production --source-release my-app --namespace my-namespace --set myenv=prod' +{% endraw %} +{% endhighlight %} + +## Viewing the promotion pipeline + +When you promote a Helm Release for a Board, you can view the pipeline for that release. + +1. Click on Boards under the Helm section on the left-hand side +2. Select the board you want to view +3. Select the Builds tab on the top +4. Here, you can see the Promotion Pipelines / builds for promoting a Release + +## Editing your Helm Boards + +For any existing Helm board, you have the following options: + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/board-management.png" +url="/images/deployments/helm/promotion/board-management.png" +alt="Editing a Helm environment" +caption="Editing a Helm environment" +max-width="80%" +%} + + +1. The refresh button will update the board with the current state of the clusters +1. The filtering menu can be used to further constrain the Helm releases shown in each column. +1. The *edit properties* button allows you to change again the title of the board as well as a global filter for Helm releases +1. The *remove board* completely deletes the present board from the Codefresh UI +1. The environment details on the environment header are: +* The edit button to change again the options for this column (shown on mouse hover) +* The delete button to remove this column from the board (shown on mouse hover) +* The plus button to install a new chart. If you selected one or more charts when you defined your environment, only the selected charts are displayed. +* A numeric value that shows how many releases are contained on this environment +1. The delete button allows you to uninstall a Helm release for an environment + +The filtering options allow you to further constrain the Helm release shown for the whole board. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/filter.png" +url="/images/deployments/helm/promotion/filter.png" +alt="Filtering options" +caption="Filtering options" +max-width="50%" +%} + +The filters are especially helpful in Helm boards with large numbers of environments and/or releases. + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Using external Helml repos in Codefresh pipelines]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#add-helm-repository) +[Managing Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) +[Environment Dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/environment-dashboard/) diff --git a/_docs/deployments/helm/helm-releases-management.md b/_docs/deployments/helm/helm-releases-management.md new file mode 100644 index 000000000..7f0154acd --- /dev/null +++ b/_docs/deployments/helm/helm-releases-management.md @@ -0,0 +1,263 @@ +--- +title: "Managing Helm releases" +description: "Manage Helm deployments from the Codefresh UI" +group: deployments +sub_group: helm +redirect_from: + - /docs/helm-releases-management/ + - /docs/deployments/helm/helm3/ +toc: true +--- +Codefresh has built-in integration for Helm that provides a unique view into your production Kubernetes cluster. +In Helm Releases, you can see the current status of your cluster, including the currently deployed releases, their previous revisions including change tracking, and even roll back to a previous release. + +Codefresh also offers [an environment view for Helm releases]({{site.baseurl}}/docs/deploy-to-kubernetes/environment-dashboard/) as well as [a promotion dashboard]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion). + + +## View Helm releases and release information + +View all the Helm releases in your cluster, and drill down into a specific release to see its services, deployed versions, manifests and more. + +> Make sure you have [connected your Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/adding-non-gke-kubernetes-cluster/) to Codefresh. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Releases**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/helm-release-dashboard.png" +url="/images/deployments/helm/helm-release-dashboard.png" +alt="Helm Releases" +caption="Helm Releases" +max-width="90%" +%} + + + + +{:start="2"} +1. To see details for a specific release, click the release name. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/services.png" +url="/images/deployments/helm/services.png" +alt="Kubernetes Services" +caption="Kubernetes Services" +max-width="70%" +%} + +The History tab shows all previous releases. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/history.png" +url="/images/deployments/helm/history.png" +alt="Helm History" +caption="Helm History" +max-width="60%" +%} + +You can further expand a release revision to see exactly what files were changed in this release. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/diff.png" +url="/images/deployments/helm/diff.png" +alt="Helm diff" +caption="Helm diff" +max-width="60%" +%} + +There are other tabs that show you the chart used, the values as well as the final manifests that were actually deployed. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/manifests.png" +url="/images/deployments/helm/manifests.png" +alt="Final rendered manifests" +caption="Final rendered manifests" +max-width="50%" +%} + +## Add labels to Kubernetes services + +For better visibility into services, add the [recommended labels](https://helm.sh/docs/topics/chart_best_practices/labels/){:target="\_blank"} to your Kubernetes service. + +{% highlight yaml %} +{% raw %} + apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }} + labels: + app.kubernetes.io/name: "{{ template "name" . }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" +{% endraw %} +{% endhighlight %} + +To use the instance label for something different, you can also use a release label instead: + +{% highlight yaml %} +{% raw %} +release: {{ .Release.Name }} +{% endraw %} +{% endhighlight %} + + + +## Add an upgrade message + +Codefresh allows you to display a meaningful description for each release in the release history. This message +can help show the main reason behind each release, or any other message that is convenient for you. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/helm-commit-message.png" +url="/images/deployments/helm/helm-commit-message.png" +alt="Helm release message" +caption="Helm release message" +max-width="70%" +%} + +You can set this message for your Helm release in three ways: + +1. When you manually install a Helm release from the [Helm charts screen]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#install-chart-from-your-helm-repository), there is a field for this message. +1. Set the property `commit_message` inside the [notes.txt](https://helm.sh/docs/chart_template_guide/notes_files/){:target="\_blank"} file of your chart. +1. By providing an environment variable called `COMMIT_MESSAGE` within your [pipeline Helm step]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + + +## Roll back a Helm release + +You can rollback to a previous revision of a release in the History tab. + +1. Click the Helm release for which to perform a rollback, and then click the **History** tab. +1. To rollback to a specific release, click **Rollback** in the row. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/rollback.png" +url="/images/deployments/helm/rollback.png" +alt="Rolling back to a previous release" +caption="Rolling back to a previous release" +max-width="50%" +%} + +>It takes time to complete rollback for a release, and the change in the cluster is not instantly updated in the Codefresh UI. If you also use a [custom rollback pipeline](#overriding-the-default-helm-actions), the delay between the cluster update and the UI refresh is even longer. + +## Helm UI actions + +From the main release screen, you have some additional actions. + +You can issue a [Helm test](https://github.com/kubernetes/helm/blob/master/docs/chart_tests.md) by clicking on the 'Run Test' button on the desired chart row. + +You can delete a release by clicking on the 'Delete' button on the desired chart row. +For deletion options, see the [helm delete documentation](https://github.com/kubernetes/helm/blob/master/docs/helm/helm_delete.md){:target="\_blank"}, for example, *purge* will remove the revision from the release history. + +## Helm deployment badge + +Similar to a [build badge]({{site.baseurl}}/docs/pipelines/build-status/#using-the-build-badge), you can also get a deployment badge for a Helm release. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Releases**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. In the row with the Helm release for which to add a deployment badge, click the **Settings** (gear) icon. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/helm-badge.png" +url="/images/deployments/helm/helm-badge.png" +alt="Helm Deployment badge" +caption="Helm Deployment badge" +max-width="60%" +%} + +{:start="3"} +1. To get deployment information, click **Badge**. + Codefresh provides the Markdown/HTML/Link segment that you can embed in README or other documents to show deployment information. + +## Overriding default Helm actions for releases + +By default, when you take an action in the UI, Codefresh executes the native Helm command corresponding to that action: + +* `helm test` for testing a chart +* `helm rollback` for rollbacks +* `helm delete` or `helm uninstall --keep-history` for delete +* `helm delete --purge ` or `helm uninstall ` for purging a release + +You can override these actions for a specific Helm release by defining custom pipelines for each action. This way you can add your extra logic on top of these actions. For example your own Helm uninstall pipeline might also have a notification step that posts a message to a Slack channel after a release is removed. + +>Only [Codefresh admin users]({{site.baseurl}}/docs/administration/access-control/#users-and-administrators) can override the default pipelines defined for a Helm release. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Releases**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. In the row with the Helm release for which to override default actions, click the **Settings** (gear) icon. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/override-helm-actions.png" +url="/images/deployments/helm/override-helm-actions.png" +alt="Changing default Helm actions" +caption="Changing default Helm actions" +max-width="50%" +%} + +{:start="3"} +1. Select the pipeline to use for the respective actions. + +### Environment variables for custom Helm commands +If you do override any of these actions, the following [environment variables]({{site.baseurl}}/docs/codefresh-yaml/variables/) are available in the respective pipeline, so that you can use your own custom Helm command. + +**Helm Test pipeline** +* `CF_HELM_RELEASE`: Name of release +* `CF_HELM_KUBE_CONTEXT`: `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_NAMESPACE`: Namespace where release is stored +* `CF_HELM_TIMEOUT`: Time in seconds to wait for any individual Kubernetes operation +* `CF_HELM_CLEANUP`: Delete test pods upon completion + + + +**Helm Rollback pipeline** +* `CF_HELM_VERSION`: Helm version, ex.: 3.0.1, 2.7.0 +* `CF_HELM_RELEASE`: Name of release on cluster +* `CF_HELM_REVISION`: Revision to use for rollback +* `CF_HELM_KUBE_CONTEXT`: `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_NAMESPACE`: Namespace where release is stored + + +**Helm Delete pipeline** +* `CF_HELM_PURGE`: Boolean, delete release from store +* `CF_HELM_RELEASE`: Name of release +* `CF_HELM_TIMEOUT`: Time in seconds to wait for any individual Kubernetes operation +* `CF_HELM_HOOKS`: Prevent hooks from running during install +* `CF_HELM_KUBE_CONTEXT`: `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_VERSION`: Helm version, ex.: 3.0.1, 2.7.0 +* `CF_HELM_NAMESPACE`: Namespace where release is stored + + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm charts and repositories]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/) +[Codefresh-managed Helm Repositories]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[Helm promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) \ No newline at end of file diff --git a/_docs/deployments/helm/managed-helm-repository.md b/_docs/deployments/helm/managed-helm-repository.md new file mode 100644 index 000000000..6b27d3ade --- /dev/null +++ b/_docs/deployments/helm/managed-helm-repository.md @@ -0,0 +1,137 @@ +--- +title: "Using a managed Helm repository" +description: "Use the Codefresh integrated Helm repository" +group: deployments +sub_group: helm +toc: true +--- + +Codefresh provides fully managed, hosted Helm repositories for users. +While we automatically create a default managed repo for every Codefresh account, you can also add [external Helm repositories]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/). + +The built-in Helm repo that Codefresh creates, is private by default, allowing access only via Codefresh or via a Codefresh API token. + +> Tip: + You may be familiar with the popular open source Helm repository implementation called 'ChartMuseum', that Codefresh sponsors. Codefresh-managed repositories are based on, and therefore compatible with, ChartMuseum and its unique features. For details, see [ChartMuseum](https://github.com/kubernetes-helm/chartmuseum){:target="\_blank"}. + +## View Helm repository integrations + +The Codefresh-managed Helm repo is displayed with other Helm repositories you have added to Helm integrations. + +>You cannot delete the built-in Helm repo that Codefresh creates for you. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select **Pipeline Integrations**. +1. Scroll to **Helm Repositories**, and then click **Configure**. + All the Helm integrations you set up are displayed. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/managed-helm-repo.png" +url="/images/deployments/helm/managed-helm-repo.png" +alt="Codefresh built-in Helm repository" +caption="Codefresh built-in Helm repository" +max-width="50%" +%} + + +## Get the chart repository URL +Get the chart repository URL for any Helm integration. +The URL is in the format: `cm://h.cfcr.io//`, where the default repo is `default`. + +* From the list of Helm integrations, select the integration and then click the **Edit** icon on the left. + The Helm Repository URL field displays the chart URL. + +## Codefresh Helm dashboards + +The Helm Charts and Helm Releases dashboards are automatically configured to work with your default managed repo to easily install charts and manage releases. +For more information, see [install chart from a Helm repository]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#install-chart-from-your-helm-repository) and [Managing Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/). + +## Use Codefresh CLI for advanced Helm repo management + +The Codefresh CLI supports advanced management options for your managed repository, without having to log in to the Codefresh UI. +For more information on CLI support for Helm repos, see the [CLI documentation on Helm Repos](https://codefresh-io.github.io/cli/helm-repos/){:target="\_blank"}. + + +## Set access level for managed repo + +The managed Helm repository supports two modes of access: +* Private +* Public + +By default, the managed Helm repo is created with `Private` access, meaning that read/write access is protected by Codefresh authentication. + +You can switch the access level to `Public`, which will make the repository accessible to anonymous users only *for read operations*. Write operations, even in public access mode, always require authentication. +Be very careful when you make your repo public, as the whole world will be able to access your charts. We recommend this setting only for quick demos and POCs. + +**How to** + +* Use the Codefresh CLI to toggle access level on a managed repo: + +{% highlight bash %} +codefresh patch helm-repo mycfrepo -public +{% endhighlight %} + +For more info, see the relevant section in the [Codefresh CLI documentation](https://codefresh-io.github.io/cli/helm-repos/update-helm-repo/){:target="\_blank"}. + +## Working with Helm CLI + +The private Helm repository offered by Codefresh is a standard Helm repo and will work with the vanilla Helm executable even outside of the Codefresh UI. +We suggest using the private [Helm repo from Codefresh pipelines]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/), but you can also use it from your workstation. + +### Add a Public repo to Helm + +If your repo is set to `public` access mode, you can use it just like any other HTTP Helm repository. +You can: + +{% highlight bash %} +helm repo add mycfrepo https://h.cfcr.io// +{% endhighlight %} + +### Add a Private repo to Helm + +If your repo is set to `private` access mode, the default, then the Helm client needs to authenticate with Codefresh. +To authenticate, you can use ChartMuseum's 'Helm Push' CLI plugin which adds support for authentication and chart manipulation on top of the basic Helm CLI functionality. + +We highly recommend that you familiarize yourself with the [Helm Push plugin](https://github.com/chartmuseum/helm-push){:target="\_blank"}. + +#### Install the Helm Push plugin + +{% highlight bash %} +helm plugin install https://github.com/chartmuseum/helm-push +{% endhighlight %} + +#### Configure the Helm Push plugin + +If you have the Codefresh CLI installed and configured, there's nothing you need to do. The Helm Push plugin picks up your settings automatically. +To learn about getting started with Codefresh CLI, see [CLI getting started](https://codefresh-io.github.io/cli/getting-started/). +To learn about manual authentication without depending on the Codefresh CLI, see [here](https://github.com/chartmuseum/helm-push#token). + +#### Add the private repo + +{% highlight bash %} +helm repo add mycfrepo cm://h.cfcr.io/kostis-codefresh/default +{% endhighlight %} + +Notice the protocol is `cm://` instead of `https://`. This indicates the custom authentication scheme supported by ChartMuseum Helm Push plugin. + +## Using in a Codefresh pipeline + +The Codefresh Helm plugin automatically handles authentication for managed repositories. You can use the plugin as you usually would. For more information, see the [Codefresh Helm plugin]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + +## Removing a Helm chart from a private Codefresh repository + +You can delete a Helm chart from your own Helm repository with the following HTTP call. + +{% highlight bash %} +curl -X DELETE -v -H "Authorization: Bearer " https://h.cfcr.io/api///charts// +{% endhighlight %} + +Replace values in `<>` with your own (also removing `<>` in the process). + +Generate an api key from [https://g.codefresh.io/user/settings](https://g.codefresh.io/user/settings) as explained in the [API page]({{site.baseurl}}/docs/integrations/codefresh-api/). + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm integration]({{site.baseurl}}/docs/integrations/helm/) +[Managing Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) diff --git a/_docs/deployments/helm/using-helm-in-codefresh-pipeline.md b/_docs/deployments/helm/using-helm-in-codefresh-pipeline.md new file mode 100644 index 000000000..f8e0b33bc --- /dev/null +++ b/_docs/deployments/helm/using-helm-in-codefresh-pipeline.md @@ -0,0 +1,346 @@ +--- +title: "Using Helm in a Codefresh pipeline" +description: "Deploy and push Helm charts with Codefresh" +group: deployments +sub_group: helm +redirect_from: + - /docs/deployments/helm/create-helm-artifacts-using-codefresh-pipeline/ + - /docs/install-helm-chart-using-codefresh-pipeline/ +toc: true +--- + +We created a [special Helm step](https://codefresh.io/steps/step/helm){:target="\_blank"} for easy integration of Helm in Codefresh pipelines. The Helm step facilitates authentication, configuration, and execution of Helm commands. + +> If you have a special use case that is not covered by the Codefresh Helm step, you can always use the regular `helm` cli in a freestyle step. + In this case, you can use the simpler container `codefresh/kube-helm` which includes only Kubectl and helm tools. `kube-helm` is available on DockerHub: [https://hub.docker.com/r/codefresh/kube-helm/](https://hub.docker.com/r/codefresh/kube-helm/){:target="\_blank"}. + +If you are just starting with Helm, refer to our [Helm quick start guide]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm/) . And, if you prefer to work directly with code, see our [full Helm example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). + +## Helm setup + + + +To use Helm in your Codefresh pipeline you must do the following: + +1. Make sure that your application has a [Helm chart](https://helm.sh/docs/chart_template_guide/getting_started/) +1. Create a Helm package for your application from the chart +1. [Add a Kubernetes cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/) in Codefresh +1. Define a Helm repository or use the [one offered by Codefresh to all accounts]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +1. Import the Helm [configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) into your pipeline variables +1. Use the Helm step in your [yml build definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + +Let's see these steps in order. + +### Step 1: Create a Helm chart for your application + +Helm applications are bundled in special archives called *Charts*. You can create a Helm +chart for your application by following [the official documentation on charts](https://helm.sh/docs/chart_template_guide/getting_started/){:target="\_blank"}. + +The example Codefresh application includes a [sample chart](https://github.com/codefresh-contrib/python-flask-sample-app/tree/with-helm/charts/python){:target="\_blank"}, used in our Helm quick start guide, mentioned earlier in this article. + +You can create the chart manually or by using the [helm create](https://helm.sh/docs/helm/#helm-create){:target="\_blank"} command on your workstation. There are also several third-party tools that can create Helm packages for you such as [Draft](https://draft.sh/){:target="\_blank"}. + +Once your Helm chart is ready, commit it to a folder called `charts`, in the same Git repository that contains the source code of your application. Codefresh can also work with Helm charts that are in different Git repositories. We suggest however that you keep both the source code and the Helm chart of an application in the same Git repository to make chart management much easier. + + +### Step 2: Select Kubernetes cluster for deployment + +The Helm pipeline step requires the configuration of a `kube_context` variable that determines the Kubernetes cluster used for the deployment. + +1. Connect your Kubernetes cluster with Codefresh, as described [here]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/). + +1. Provide the cluster to the Helm step by adding the `KUBE_CONTEXT` variable, where the value is the connection *name* entered when you created the connection. +> The connection name also appears as the title of the cluster in Kubernetes integration settings (Account Settings >Integrations > Kubernetes). + +{% include image.html +lightbox="true" +file="/images/deployments/helm/k8s-name.png" +url="/images/deployments/helm/k8s-name.png" +alt="Name of Kubernetes cluster" +caption="Name of Kubernetes cluster" +max-width="70%" +%} + +1. Verify that your cluster is set up for Helm, from the sidebar, below DevOps Insights, select **Helm Releases**. + The [Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) in your cluster are displayed. If you have just started using Helm, the release page may be empty. + +### Step 3: Define a Helm repository + +To push your chart to a Helm repository, configure the target repository to work with. +Always a good practice to save Helm charts in Helm repositories, Codefresh supports a variety of private, authenticated Helm repositories +in addition to public HTTP repositories. Codefresh also provides a free, managed Helm repository for every account. + +* Either [connect your repository with Codefresh]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) +OR +* Obtain your [managed Helm repository URL]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/#chart-repository-url) + + +### Step 4: (Optional) Import the Helm configuration into your pipeline definition + +Once you have a connected to a Helm repository, attach it to the pipeline. + +1. Frpm the Pipelines page, select the pipeline into which to import the Helm configuation. +1. In the Workflows tab, do one of the following: + * Click **Variables** on the right, and then click the Settings (gear) icon in the variables section on the right. + * Click the context menu next to the settings icon. +1. Click on **Import from/Add shared configuration**, and from the list, select `CF_HELM_DEFAULT`. See [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). + +{% include image.html +lightbox="true" +file="/images/deployments/helm/import-helm-configuration.png" +url="/images/deployments/helm/import-helm-configuration.png" +alt="Connecting a Helm repository in the pipeline" +caption="Connecting a Helm repository in the pipeline" +max-width="50%" +%} + + +### Step 5: Use the Helm freestyle step in the pipeline + +You can now use the Helm freestyle step in the `codefresh.yml` file. This step is only needed in pipelines that actually upload/fetch Helm charts to/from Helm repositories. If your pipeline directly installs a Helm chart from the Git filesystem, there is no need to import a Helm configuration. + +>Currently, you can use only one Helm configuration in the same pipeline. We are aware +of this limitation and will soon improve the way Codefresh works with multiple Helm configurations. + + + +* Use the Helm typed step from the [Step Marketplace](https://codefresh.io/steps/step/helm){:target="\_blank"}. +* Configure the Helm step using environment variables, as described [here]({{site.baseurl}}/docs/codefresh-yaml/variables/#user-provided-variables). + +The example below illustrates how to provide variables as part of the Helm step definition: + +```yaml +deploy: + type: helm + arguments: + action: install + chart_name: test_chart + release_name: first + helm_version: 3.0.3 + kube_context: my-kubernetes-context + custom_values: + - 'pat.arr="{one,two,three}"' + - 'STR_WITH_COMAS="one\,two\,three"' +``` + + + +#### Helm step action modes + +The Helm step can operate in one of three modes, as defined by the `action` field: + +1. `install`: Installs the chart into a Kubernetes cluster. This is the default mode if not explicitly set. +2. `push`: Packages the chart and pushes it to the repository. +3. `auth`: Authenticate only. Only sets up authentication and adds the repo to the Helm. This mode is useful to write your own Helm commands using the freestyle step's `commands` property, but still allow the step to handle authentication. + + +#### Helm values + +* To supply a value file, add to the Helm step, `custom_values_file`, with the value pointing to an existing values file. +* To override specific values, add to the Helm step, `custom_values` followed by the path to the value to set. For example, `myservice_imageTag`. Note that `.` (dot) should be replaced with `_` (underscore). The value of the variable is used to override or set the templated property. + +Examples: +```yaml +... + custom_values: + - 'myimage_pullPolicy=Always' +... +``` +results in: +`--set myimage.pullPolicy=Always` + +```yaml +... + custom_value_files: + - 'values-prod.yaml' +... +``` +results in: +`--values values-prod.yaml` + +If a variable already contains a `_` (underscore) in its name, replace it with `__` (double underscore). + +## Helm usage examples + +The following sections illustrate all three modes of Helm usage. + +You can also look at the [GitHub repository](https://github.com/codefresh-contrib/helm-sample-app){:target="\_blank"} of [our Helm example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/) for full pipelines: + +* Pipeline YAML for [deploying a chart](https://github.com/codefresh-contrib/helm-sample-app/blob/master/codefresh-do-not-store.yml){:target="\_blank"} +* Pipeline YAML for [both storing and deploying a chart](https://github.com/codefresh-contrib/helm-sample-app/blob/master/codefresh.yml){:target="\_blank"} + +### Helm usage example: Installing a Helm Chart + +The following example includes the minimum configuration to install a Helm chart from a repository. For more configuration options, see the [Arguments reference](https://codefresh.io/steps/step/helm){:target="\_blank"}. + +```yaml +deploy: + type: helm + arguments: + action: install + chart_name: path/to/charts + release_name: first + helm_version: 3.0.3 + kube_context: my-kubernetes-context +``` + +### Helm usage example: Pushing a Helm Chart + +The following example illustrates how to package and push a Helm chart into a repository. + +```yaml +deploy: + type: helm + arguments: + action: push + chart_name: /codefresh/volume/repo/chart + chart_repo_url: 'cm://h.cfcr.io/useraccount/default' +``` + +> **Notes**: + - Assumes that a Git repository with the Helm chart files was cloned as a part of the pipeline. + - The Git repository contains the chart files in the `chart` directory. + - `chart_repo_url` is optional. If a [Helm repository configuration](#step-4-optional-import-the-helm-configuration-in-your-pipeline-definition) is attached to the pipeline, this setting is ignored. + +### Helm usage example: Authenticating only + +The following example illustrates the Helm mode for authentication only. + +```yaml +deploy: + type: helm + arguments: + action: auth + kube_context: my-kubernetes-context + commands: + - helm list +``` + +### Helm usage example: Custom Helm commands + +The following example illustrates executing custom Helm commands. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +my_custom_helm_command: + type: helm + arguments: + action: auth + kube_context: my-kubernetes-context + commands: + - source /opt/bin/release_chart + - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ + - helm repo add stable https://kubernetes-charts.storage.googleapis.com + - helm repo list + - helm repo update + - helm list +{% endraw %} +{% endhighlight %} + +> Notes: +- The directory that contains a chart MUST have the same name as the chart. Thus, a chart named `my-chart` MUST be created in a directory called `my-chart/`. This is a requirement of the [Helm Chart format](https://helm.sh/docs/chart_template_guide/). + +## Helm configuration fields + +Name|Required|Description +---|---|--- +action|Defaults to 'install'|Operation mode: `install`/`push`/`auth` +chart_name|required for install/push|Chart reference to use, adhering to Helm's lookup rules (path to chart folder, or name of packaged chart). There's no need to prefix with `/reponame` if referencing a chart in a repository, this is handled automatically. a.k.a `CHART_NAME` but `CHART_NAME` shouldn't be used anymore. +chart_repo_url|optional|Helm chart repository URL. If a [Helm repository configuration](#step-4-optional---import-the-helm-configuration-in-your-pipeline-definition) is attached to the pipeline, this setting is ignored. +chart_version|optional|Override or set the chart version. +cmd_ps|optional|When defined, Command Postscript is appended as is to the generated Helm command string. Can be used to set additional parameters supported by the command but not exposed as configuration options.| +commands|optional|Commands to execute in plugin after `auth` action. +custom_value_files|optional|Values file to provide to Helm as `--values` or `-f`.| +custom_values|optional|Values to provide to Helm as `--set` +helm_version|optional|Version of [cfstep-helm image](https://hub.docker.com/r/codefresh/cfstep-helm/tags){:target="\_blank"} +kube_context|required for install|Kubernetes context to use. The name of the cluster as [configured in Codefresh]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/). +namespace|optional|Target Kubernetes namespace to deploy to. +release_name|required for install|Helm release name. If the release exists, it is upgraded. +repos|optional|Array of custom repositories. + + +## Full Helm pipeline example + +The pipeline in this example builds a docker image, runs unit tests, stores the Helm chart in the Codefresh private Helm repository and finally deploys the Helm chart to a cluster. + +{% include image.html +lightbox="true" +file="/images/deployments/helm/full-helm-pipeline.png" +url="/images/deployments/helm/full-helm-pipeline.png" +alt="Helm pipeline" +caption="Helm pipeline" +max-width="90%" +%} + +This is the pipeline definition: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - build + - test +steps: + clone: + title: Cloning main repository... + stage: checkout + type: git-clone + arguments: + repo: 'codefresh-contrib/python-flask-sample-app' + revision: with-helm + git: github + MyAppDockerImage: + title: Building Docker Image + stage: build + type: build + working_directory: '${{clone}}' + arguments: + image_name: kostis-codefresh/python-flask-sample-app + tag: 'master' + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + stage: test + type: freestyle + working_directory: '${{clone}}' + arguments: + image: ${{MyAppDockerImage}} + commands: + - python setup.py test + StoreChart: + title: Storing Helm Chart + type: helm + stage: store + working_directory: ./python-flask-sample-app + arguments: + action: push + chart_name: charts/python + kube_context: kostis-demo@FirstKubernetes + DeployMyChart: + type: helm + stage: deploy + working_directory: ./python-flask-sample-app + arguments: + action: install + chart_name: charts/python + release_name: my-python-chart + helm_version: 3.0.2 + kube_context: kostis-demo@FirstKubernetes + custom_values: + - 'buildID=${{CF_BUILD_ID}}' + - 'image_pullPolicy=Always' + - 'image_tag=master' + - 'image_pullSecret=codefresh-generated-r.cfcr.io-cfcr-default' +{% endraw %} +{% endhighlight %} + +You can see the source code in our [example section]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). + + +## Related articles +[Helm Charts and repositories]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) +[Using managed Helm repositories]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[Helm Promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) diff --git a/_docs/deployments/kubernetes/custom-kubectl-commands.md b/_docs/deployments/kubernetes/custom-kubectl-commands.md new file mode 100644 index 000000000..77bc7411d --- /dev/null +++ b/_docs/deployments/kubernetes/custom-kubectl-commands.md @@ -0,0 +1,184 @@ +--- +title: "Custom kubectl commands" +description: "Use kubectl in your Codefresh pipelines" +group: deployments +sub_group: kubernetes +toc: true +--- + +As explained in [Kubernetes deployment options]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/), Codefresh has built-in functionality for deploying to Kubernetes clusters. + +For maximum flexibility with cluster deployments, you can run your own custom `kubectl` commands in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +[Kubectl](https://kubernetes.io/docs/reference/kubectl/overview/){:target="\_blank"} is the command line interface for managing kubernetes clusters. + +Codefresh automatically sets up your [config context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/){:target="\_blank"} with your connected clusters. + +The config context is automatically placed for you at the path of the [variable]({{site.baseurl}}/docs/pipelines/variables/) `$CF_KUBECONFIG_PATH`. +In the current Codefresh implementation, this expands to `/codefresh/volume/sensitive/.kube/config`, within the [shared step volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). + +When you use custom `kubectl` commands, it is your responsibility to template your manifests using any of the available options. To employ Codefresh for templating, it is better to use the dedicated [cf-deploy-kubernetes step]({{site.baseurl}}/docs/deployments/ci-cd-guides/kubernetes-templating/), which provides simple templating capabilities. + +## Using the Codefresh kubectl image + +Codefresh already offers a public Docker image with `kubectl` at [https://hub.docker.com/r/codefresh/kubectl/tags](https://hub.docker.com/r/codefresh/kubectl/tags){:target="\_blank"}. You can choose a specific version of `kubectl` with the appropriate tag or just select `latest` for the most up-to-date version. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyCustomKubectlCommands: + title: Running Kubectl + image: codefresh/kubectl:1.13.3 + commands: + - echo $CF_KUBECONFIG_PATH + - kubectl help +{% endraw %} +{% endhighlight %} + +If you run the pipeline, you can see the help options for `kubectl`. + +## Getting a config context + +The important thing to know when running custom `kubectl` commands is that Codefresh automatically sets up +your [kubeconfig files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/){:target="\_blank"} for you with the cluster information present in [integrations]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster). + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/kube-context.png" +url="/images/deployments/kubernetes/kube-context.png" +alt="Codefresh cluster names" +caption="Codefresh cluster names" +max-width="50%" +%} + +If you run this pipeline, you will see the names of all your connected clusters: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyCustomKubectlCommands: + title: Running Kubectl + image: codefresh/kubectl + commands: + - kubectl config get-contexts +{% endraw %} +{% endhighlight %} + +With two sample clusters, the output of this pipeline is the following: + +``` +Running freestyle step: Running Kubectl +Pulling image codefresh/kubectl:latest +Status: Image is up to date for codefresh/kubectl:latest +NAME CLUSTER AUTHINFO NAMESPACE +gke-kostisdemo-codefresh-kostis gke-kostisdemo-codefresh-kostis gke-kostisdemo-codefresh-kostis default +kostis-demo@FirstKubernetes kostis-demo@FirstKubernetes kostis-demo@FirstKubernetes default + +``` + +You can modify the current config context and run any `kubectl` command you want applied to that context. The next pipeline will print all the nodes of the first cluster: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyCustomKubectlCommands: + title: Running Kubectl + image: codefresh/kubectl + commands: + - kubectl config get-contexts + - kubectl config use-context "gke-kostisdemo-codefresh-kostis" + - kubectl get nodes +{% endraw %} +{% endhighlight %} + +## Example of parallel deployment with kubectl + +Let's see a full example. In this pipeline, we will create two Docker images and deploy them on two separate clusters, using custom `kubectl` commands. We will also use the [parallel capability]({{site.baseurl}}/docs/pipelines/advanced-workflows/) of Codefresh pipelines. + +Here is the pipeline: + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/parallel-kubectl.png" +url="/images/deployments/kubernetes/parallel-kubectl.png" +alt="Parallel kubectl deployment" +caption="Parallel kubectl deployment" +max-width="100%" +%} + +And here is the complete `codefresh.yml`: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' + +stages: +- build +- deploy + +steps: + BuildingApps: + type: parallel + stage: 'build' + steps: + BuildingApp1: + title: Building App 1 + type: build + stage: build + image_name: nestjs-app + working_directory: ./my-nestjs-project/ + dockerfile: Dockerfile + BuildingApp2: + title: Building App 2 + type: build + stage: build + image_name: rails + working_directory: ./my-rails-project/ + dockerfile: Dockerfile + DeployingApps: + type: parallel + stage: 'deploy' + steps: + DeployApp1: + title: Deploying App 1 + stage: deploy + image: codefresh/kubectl + working_directory: ./my-nestjs-project/ + commands: + - kubectl config get-contexts + - kubectl config use-context "gke-kostisdemo-codefresh-kostis" + - kubectl apply -f service.yml deployment.yml + DeployApp2: + title: Deploying App 2 + stage: deploy + image: codefresh/kubectl + working_directory: ./my-rails-project/ + commands: + - kubectl config get-contexts + - kubectl config use-context "kostis-demo@FirstKubernetes" + - kubectl apply -f service.yml deployment.yml configmap.yml +{% endraw %} +{% endhighlight %} + +In the example above, we select one of the clusters in each deployment step, and then apply several Kubernetes manifests that constitute an application. + +## Related articles +[Managing your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Accessing a Docker registry]({{site.baseurl}}/docs/ci-cd-guides/access-docker-registry-from-kubernetes/) + + + + + + + + + + + \ No newline at end of file diff --git a/_docs/deployments/kubernetes/deployment-options-to-kubernetes.md b/_docs/deployments/kubernetes/deployment-options-to-kubernetes.md new file mode 100644 index 000000000..3442e983f --- /dev/null +++ b/_docs/deployments/kubernetes/deployment-options-to-kubernetes.md @@ -0,0 +1,141 @@ +--- +title: "Deployment options for Kubernetes" +description: "Deploy to Kubernetes with the declarative deploy step" +group: deployments +sub_group: kubernetes +redirect_from: + - /docs/deploy-to-kubernetes/ + - /docs/deployment-to-kubernetes-quick-start-guide/ + - /docs/deploy-to-kubernetes/deployment-to-kubernetes-quick-start-guide/ + - /docs/deploy-to-kubernetes/get-ready-to-deploy/ +toc: true +--- + +Codefresh offers several options when it comes to Kubernetes deployments: + +1. Codefresh UI for on demand deployments + This is the easiest deployment option for Kubernetes. See our [Kubernetes quick start guide]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/). +1. Through a dedicated [deploy step]({{site.baseurl}}/docs/pipelines/steps/deploy/) in a pipeline + Described in this article. +1. Through the [cf-deploy-kubernetes step]({{site.baseurl}}/docs/ci-cd-guides/kubernetes-templating/) in a pipeline + Use this to also perform simple templating on Kubernetes manifests. +1. Through a [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) step with [Kustomize](https://kustomize.io){:target="\_blank"}. + See [Deployment with Kustomize]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-with-kustomize). +1. Using a [freestyle]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) step with your own `kubectl` commands + This deployment option gives you great flexibility, but assumes that you know how to work with `kubectl`. See [Custom kubectl commands]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/). +1. Using Helm as a package manager + See our [Helm quick start guide]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm/) . + +## Prerequisites + +* A K8s cluster in Codefresh (see [Connecting a Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) +* Familiarity with the [Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/), basic [pipeline steps ]({{site.baseurl}}/docs/pipelines/steps/), and how to describe them +* [Integrate your Docker registry]({{site.baseurl}}/docs/integrations/docker-registries/) with Codefresh + +## Build and push your image +Here is a basic Codefresh pipeline scenario to build and push your image to Dockerhub registry. + + `YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildImage: + type: build + image_name: '/' #specify your future image reference here + dockerfile: Dockerfile + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + + PushToDockerRegistry: + type: push + candidate: '${{BuildImage}}' + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + registry: 'dockerhub' #the name of the registry you added to Codefresh +{% endraw %} +{% endhighlight %} + +Using this YAML example, we'll add an additional step to deploy the image in Dockerhub to Kubernetes. + +## Describe your deployment +The following instructions describe how to create a new service in your Kubernetes cluster in order to deploy to it. +>If you're deploying to an existing service in your Kubernetes cluster, please skip to the [next step]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/#add-a-deployment-step). + + + 1. Go to the **`Kubernetes` → `Services page`**. + 1. Click the button **“Add Service”**. + 1. Select the **cluster**. + 1. Select the **namespace**. + 1. Type an arbitrary **service name**. + 1. Specify the **number of replicas**. + 1. Type the name of your **pushed image**. + 1. In the **“Internal Ports”** field specify the port which your application listens to. + 1. In the **“Expose port”** field specify the port to be exposed to the Internet and check the checkbox. + 1. Click the button **“Deploy”** to deploy the application. + +Wait until the deployment is completed, and you can open the deployed application in your browser by clicking on the "endpoint" link. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/describe-k8s-deployment.png" +url="/images/deployments/kubernetes/describe-k8s-deployment.png" +alt="Describe Kubernetes deployment" +caption="Describe Kubernetes deployment" +max-width="60%" +%} + +## Add a Deployment step +So now you have deployed your image manually, which is great. But how to trigger the deployment within your pipeline? For that you will need to add a step of a “Deploy” type to the Codefresh YAML manifest file: + + `YAML` +{% highlight yaml %} +{% raw %} +RunningDeployScript: + title: Running Deploy Script + type: deploy + kind: kubernetes + cluster: '' #the name specified when you added the cluster + namespace: #the namespace you wish to deploy into + service: #the service you would like to update the deployment in + candidate: + image: '${{BuildImage}}' + registry: 'dockerhub' +{% endraw %} +{% endhighlight %} + +The full Codefresh YAML looks like this: + + `YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildImage: + type: build + image_name: '/' + dockerfile: Dockerfile + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + + PushToDockerRegistry: + type: push + candidate: '${{BuildImage}}' + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + registry: 'dockerhub' #the name of the registry you added to Codefresh + + RunningDeployScript: + title: Running Deploy Script + type: deploy + kind: kubernetes + cluster: '' #the name specified when you added the cluster + namespace: #the namespace you wish to deploy into + service: #the service you would like to update the deployment in + candidate: + image: '${{BuildImage}}' + registry: 'dockerhub' +{% endraw %} +{% endhighlight %} + +You can now run the whole pipeline that builds your application from source to a docker image, pushes it to a docker registry and deploys it to your Kubernetes cluster. + +## Related articles +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) diff --git a/_docs/deployments/kubernetes/manage-kubernetes.md b/_docs/deployments/kubernetes/manage-kubernetes.md new file mode 100644 index 000000000..0b9081093 --- /dev/null +++ b/_docs/deployments/kubernetes/manage-kubernetes.md @@ -0,0 +1,169 @@ +--- +title: "Managing Kubernetes clusters" +description: "Use the graphical Kubernetes dashboard in Codefresh" +group: deployments +sub_group: kubernetes +redirect_from: + - /docs/deploy-to-kubernetes/codefresh-kubernetes-integration-beta/ + - /docs/codefresh-kubernetes-integration-beta/ +toc: true +--- + +Codefresh includes a built-in Kubernetes Dashboard that allows you to see the state of your clusters, and even make changes if you have the appropriate access privileges. + +## Accessing the Kubernetes Dashboard + +After [adding a cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster), you will be able to manage your Kubernetes assets via the *Kubernetes tab* on the left pane. Clicking on the Kubernetes icon will take you to your services dashboard. + +{% include image.html +lightbox="true" +file="/images/integrations/kubernetes/kubernetes-dashboard.png" +url="/images/integrations/kubernetes/kubernetes-dashboard.png" +alt="Codefresh Kubernetes Dashboard" +caption="Codefresh Kubernetes Dashboard" +max-width="80%" + %} + +With the graphical dashboard it is very easy to locate problematic services or deploy new ones quickly. If there are clusters that are not accessible to your user you can hide them by enabling the *Hide inaccessible clusters* option at the top right of the window in order to simplify the view. + +## Viewing your Kubernetes services + +If you have too many clusters you can choose the *add filter* button at the top of the window to hide specific clusters or namespaces. + +You will be able to see the following parameters for each service: +* Name +* Cluster +* Namespace +* Replica count +* Docker image +* Selector +* A status check + +You can also switch to a Grid view if you prefer that over the default List view: + + +{% include image.html +lightbox="true" +file="/images/kubernetes/dashboard/grid-view.png" +url="/images/kubernetes/dashboard/grid-view.png" +alt="Kubernetes Dashboard grid view" +caption="Kubernetes Dashboard grid view" +max-width="80%" + %} + + If there are clusters that are not accessible to your user you can hide them by enabling the *Hide inaccessible clusters* option at the top right of the window in order to simplify the view. + + +## Work with your services + +In this view, you will be able to perform the following actions: + +* Add new service +* Edit/Update existing services +* Remove service + + +## Deploying a new service + +The Kubernetes dashboard provides a GUI dialog to quickly deploy new services in your cluster. + +### Choose a Docker image + +To add a service, click the "Add Service" button on the top or the "plus" button on a specific namespace. Then fill in the details for your new service. + +You can add images built in Codefresh which were pushed to Codefresh registry or provide a name for Docker image that will be pulled from an [external Docker registry]({{site.baseurl}}/docs/integrations/docker-registries/). Notice that images which are not from Dockerhub must be mentioned with their full domain name. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/quick-ui-deploy.png" +url="/images/deployments/kubernetes/quick-ui-deploy.png" +alt="Deploying with the quick UI dialog" +caption="Deploying with the quick UI dialog" +max-width="60%" +%} + + +Use the following steps in order to add Image and pull secrets from the [connected Docker Registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/): +* Specify the image name in the format `//:` +* Provide and image pull secret - this will be done for each namespace + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/deploying-private-cf-registry.png" +url="/images/deployments/kubernetes/deploying-private-cf-registry.png" +alt="Deploying from the private Codefresh registry" +caption="Deploying from the private Codefresh registry" +max-width="60%" +%} + + +From this screen you can also [create Kubernetes image secrets]({{site.baseurl}}/docs/ci-cd-guides/access-docker-registry-from-kubernetes/) without actually deploying anything. + + +### Set environment variables and resources + +You can add extra environment variables that will passed to the deployment image. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/environment-variables-deployment.png" +url="/images/deployments/kubernetes/environment-variables-deployment.png" +alt="Environment variables for the deployment" +caption="Environment variables for the deployment" +max-width="60%" +%} + + + +You can also define resource limits for your pods. +It is a good practice to place maximum limits so that your services do not experience resource starvation. + + +### Adding a service with a manifest file + +If you are an advanced Kubernetes user, toggle the Deployment option button to the `YAML` position on the top right corner of the screen. +In this mode you can define exactly the contents for the service and deployment Kubernetes resources. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/define-k8s-service-resource.png" +url="/images/deployments/kubernetes/define-k8s-service-resource.png" +alt="Define a Kubernetes Service Resource" +caption="Define a Kubernetes Service Resource" +max-width="60%" +%} + +You can type directly in the browser window or paste content from a text editor. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/define-k8s-deployment-resource.png" +url="/images/deployments/kubernetes/define-k8s-deployment-resource.png" +alt="Define a Kubernetes Deployment Resource" +caption="Define a Kubernetes Deployment Resource" +max-width="60%" +%} + + +Congratulations! Your service is now deployed to your Kubernetes cluster. + +You can update an existing service in a similar manner from your Kubernetes services window - Just hit the "edit" icon and update your service using the same steps as in "Add new service" section. + +## Automate your deployment + +After your service is deployed to your Kubernetes cluster, you can automate image deployment using Codefresh pipelines. + +Some of the possible options are: + +1. The dedicated [deploy step]({{site.baseurl}}/docs/pipelines/steps/deploy/) in a pipeline. +1. The [cf-deploy-kubernetes step]({{site.baseurl}}/docs/ci-cd-guides/kubernetes-templating/) in a pipeline. This can also perform simple templating on Kubernetes manifests. + +See more choices in the [Deployment options page]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/). + +## Related articles +[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +[Add Config Maps]({{site.baseurl}}/docs/ci-cd-guides/add-config-maps-to-your-namespaces/) +[Kubernetes deployment quick start]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) + + + diff --git a/_docs/example-catalog/cd-examples/amazon-ecs.md b/_docs/example-catalog/cd-examples/amazon-ecs.md new file mode 100644 index 000000000..89905c22e --- /dev/null +++ b/_docs/example-catalog/cd-examples/amazon-ecs.md @@ -0,0 +1,155 @@ +--- +title: "Amazon ECS/Fargate" +description: "Use Codefresh to deploy Docker containers to ECS/Fargate" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/amazon-ecs/ + - /docs/deploy-your-containers/ + - /docs/deploy-your-containers/amazon-ecs/ +toc: true +--- +Codefresh can deploy to any ECS or Fargate cluster created in Amazon. + +{% include image.html +lightbox="true" +file="/images/examples/amazon-ecs/ecs-pipeline-deployment.png" +url="/images/examples/amazon-ecs/ecs-pipeline-deployment.png" +alt="Deploying to Amazon ECS" +caption="Deploying to Amazon ECS" +max-width="100%" +%} + +## Prerequisites + + +1. Configure an ECS (or Fargate) Cluster with at least one running instance. +1. Configure an ECS Service and Task Definition with a reference to **the image that you are going to build and push.** See [the official amazon docs](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html) for more details. +1. Connect your [ECR to Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/amazon-ec2-container-registry/) so that it can be used by name in Codefresh pipelines. +1. Verify you have AWS Credentials (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`), with the following privileges: + + `JSON` +{% highlight json %} +{% raw %} +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1479146904000", + "Effect": "Allow", + "Action": [ + "ecs:DescribeServices", + "ecs:DescribeTaskDefinition", + "ecs:DescribeTasks", + "ecs:ListClusters", + "ecs:ListServices", + "ecs:ListTasks", + "ecs:RegisterTaskDefinition", + "ecs:UpdateService" + ], + "Resource": [ + "*" + ] + } + ] +} +{% endraw %} +{% endhighlight %} + + + +## Create a CI/CD pipeline for ECS/Fargate + +Here is the complete pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - "clone" + - "build" + - "deploy" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}" + revision: "${{CF_BRANCH}}" + stage: "clone" + git: github + BuildingDockerImage: + stage: "build" + title: Building Docker Image + type: build + image_name: ${{IMAGE}} + tag: '${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile.multistage + Push: + title: "Pushing image to ECR" + stage: "deploy" + type: "push" + tag: '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + registry: "ecr" + candidate: "${{BuildingDockerImage}}" + DeployToFargate: + stage: "deploy" + image: codefreshplugins/cf-deploy-ecs + commands: + - cfecs-update ${{REGION}} ${{ECS_CLUSTER_NAME}} ${{ECS_SERVICE_NAME}} --image-name ${{IMAGE_PREFIX}}/${{IMAGE}} --image-tag '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + environment: + - AWS_ACCESS_KEY_ID=${{AWS_ACCESS_KEY_ID}} + - AWS_SECRET_ACCESS_KEY=${{AWS_SECRET_ACCESS_KEY}} + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code with a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +1. Uses a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create a Docker image +1. Uses a [push step]({{site.baseurl}}/docs/cpipelines/steps/push/) to push the docker image to ECR. The registry was previously [connected in Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) with the `ecr` identifier. +1. Runs `codefreshplugins/cf-deploy-ecs` to perform the actual deployment + + +The pipeline needs [environment variables]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) that hold all the required parameters. + +{% include image.html +lightbox="true" +file="/images/examples/amazon-ecs/ecs-variables.png" +url="/images/examples/amazon-ecs/ecs-variables.png" +alt="ECS environment variables" +caption="ECS environment variables" +max-width="80%" +%} + + + + +Note that the **`--image-name`** and **`--image-tag`** pair should comprise the **full name** of the image that was pushed to the registry (including the registry name) in order to be correctly referred by the corresponding Task Definition. + + + +## Deployment Flow + +The `codefreshplugins/cf-deploy-ecs` step performs the following: + + +1. Gets the ECS service by specified `aws-region`, `ecs-cluster`, and `service-names`. +1. Creates a new revision from the current task definition of the service. If `--image-name` and `--image-tag` are provided, it replaces the image tag. +1. Runs the `update-service` command with the new task definition revision. +1. Waits for the deployment to complete. + * Deployment is successfully completed if `runningCount == desiredCount` for PRIMARY deployment - see `aws ecs describe-services` + * The `cfecs-update` command exits with a timeout error if after --timeout (default = 900s) `runningCount` does not equal `desiredCount` + * The `cfecs-update` exits with an error if --max-failed (default = 2) or more ECS tasks were stopped with error for the task definition that you are deploying. ECS continuously retries failed tasks. + +You can also find the same step in the form of a [Codefresh plugin](https://codefresh.io/steps/step/ecs-deploy). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[External Registries]({{site.baseurl}}/docs/integration/docker-registries/) + + diff --git a/_docs/example-catalog/cd-examples/deploy-to-heroku.md b/_docs/example-catalog/cd-examples/deploy-to-heroku.md new file mode 100644 index 000000000..100a56eca --- /dev/null +++ b/_docs/example-catalog/cd-examples/deploy-to-heroku.md @@ -0,0 +1,212 @@ +--- +title: "Deploy to Heroku" +description: "Deploy your application or image to Heroku" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +Heroku is a container-based cloud PaaS (Platform as a Service) software that allows you to deploy, run, and manage your applications. Built on top of AWS, it supports Ruby, Node.js, Java, Python, Clojure, Scala, Go and PHP. + +This tutorial will cover two examples, depending on your use case. If you are not using containers, your use case is covered using the Codefresh heroku-deployer plugin ([Example #1](#pipeline-example-1-deploying-source-code-to-heroku-using-the-codefresh-heroku-plugin)). If you are using containers, you can achieve deployment by using a combination of build, push, and freestyle steps ([Example #2](#pipeline-example-2-deploy-a-docker-image-to-heroku)). + +## Example Django Application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/heroku-python-django-sample-app). + +The repository contains a Django starter project with the following commands: + +- `pip install -r requirements.txt` to install dependencies. +- `python -m unittest composeexample.utils` runs unit tests. +- `python manage.py runserver 0.0.0.0:8000` to start the application locally. + +Once launched the application presents the Django starter page at localhost:8000. + +## Pipeline Example #1: Deploying Source Code to Heroku Using the Codefresh Heroku Plugin + +### Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/#create-a-codefresh-account/) +- A [free Heroku account](https://signup.heroku.com){:target="\_blank"} +- A Heroku API token (you can find this under **Account Settings** and then scrolling down, you will find the API Key) + +### Create the pipeline + +This pipeline has three stages: clone, test, and deploy. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-deployer-pipeline.png" +url="/images/examples/deployments/heroku-deployer-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI. It will automatically clone the project for you. + +Note that you need to change the environment variables in the deploy stage to your respective values. You can do this directly [in the YAML itself]({{site.baseurl}}/docs/how-to-guides/migrating-from-travis-ci/#environment-variables), or through the Codefresh UI. Navigate to the in-line editor, and to the right you will find a tab lebeled **Variables**. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-deployer-variables2.png" +url="/images/examples/deployments/heroku-deployer-variables2.png" +alt="Codefresh UI Pipeline Variables View" +caption="Codefresh UI Pipeline Variables View" +max-width="100%" +%} + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - clone + - test + - deploy +steps: + clone: + title: "Cloning main repository..." + stage: "clone" + type: "git-clone" + arguments: + repo: "codefresh-contrib/heroku-python-django-sample-app" + revision: "master" + git: "github" + run_unit_tests: + title: "Running unit tests..." + stage: "test" + type: "freestyle" + working_directory: "${{clone}}" + arguments: + image: "python:3.6-slim" + commands: + - "pip install -r requirements.txt --cache-dir=/codefresh/volume/pip-cache" + - "python -m unittest composeexample.utils" + deploy_to_heroku: + title: "Deploying to Heroku..." + stage: "deploy" + type: "heroku-deployer" + arguments: + APP_NAME: $APP_NAME + EMAIL: $EMAIL + API_TOKEN: $API_TOKEN +{% endraw %} +{% endhighlight %} + +The above pipeline has the following steps: + +1. A [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step that clones the main repository +2. A [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that installs dependencies and runs the unit tests +3. A freestyle step that deploys the application to Heroku using the heroku-deployer plugin from the [Step Marketplace](https://codefresh.io/steps/step/heroku-deployer) + +## Pipeline Example #2: Deploy a Docker Image to Heroku + +This example differs from the plugin usage, as it deploys a built Docker image to Heroku. + +Note that you need to change the environment variables to your respective values. You can do this directly [in the YAML itself]({{site.baseurl}}/docs/how-to-guides/migrating-from-travis-ci/#environment-variables), or through the Codefresh UI. Navigate to the in-line editor, and to the right you will find a tab lebeled **Variables**. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-deployer-variables.png" +url="/images/examples/deployments/heroku-deployer-variables.png" +alt="Codefresh UI Pipeline Variables View" +caption="Codefresh UI Pipeline Variables View" +max-width="100%" +%} + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/#create-a-codefresh-account/) +- A [free Heroku account](https://signup.heroku.com){:target="\_blank"} +- An empty repository already created in Heroku using the `heroku create ` command +- A Heroku registry [connected to Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/other-registries/#heroku-registries) +- A Heroku API token (you can find this under **Account Settings** and then scrolling down, you will find the API Key) + +### Create the pipeline + +This pipeline has five stages: clone, build, test, push, and release. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-vanilla-push-pipeline.png" +url="/images/examples/deployments/heroku-vanilla-push-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI. It will automatically clone the project for you. + +`codefresh-heroku-push-image.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +version: '1.0' +stages: + - clone + - build + - test + - push + - release +steps: + clone: + title: "Cloning main repository..." + stage: "clone" + type: "git-clone" + arguments: + repo: "codefresh-contrib/heroku-python-django-sample-app" + revision: "master" + git: "github" + build: + title: "Building Docker Image..." + stage: "build" + type: "build" + working_directory: "./heroku-python-django-sample-app" + arguments: + image_name: "${{IMAGE_NAME}}" + tag: "master" + dockerfile: "Dockerfile" + run_unit_tests: + title: "Running unit tests..." + stage: "test" + type: "freestyle" + working_directory: "./heroku-python-django-sample-app" + arguments: + image: '${{build}}' + commands: + - "python -m unittest composeexample.utils" + push_image: + title: "Pushing image to Heroku..." + stage: "push" + type: "push" + arguments: + candidate: '${{build}}' + image_name: "${{IMAGE_NAME}}/web" + registry: "heroku" + release_image: + title: "Releasing image..." + stage: "release" + type: "freestyle" + arguments: + image: "nazarcodefresh/heroku-cli:alpine" + commands: + - >- + printf "machine api.heroku.com\n login $EMAIL\n password + $API_TOKEN\nmachine git.heroku.com\n login $EMAIL\n password + $API_TOKEN\n" > ~/.netrc + - "heroku container:release --app $IMAGE_NAME web" +{% endraw %} +{% endhighlight %} + +The pipeline does the following: +1. Clones the main repository through the [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds our Docker image through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Runs unit tests on our Docker image through another freestyle step. +1. Pushes to the Heroku registry through a [push step]({{site.baseurl}}/docs/pipelines/steps/push/). +1. Releases the Docker image through another freestyle step. + + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp.md b/_docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp.md new file mode 100644 index 000000000..fa66c1c25 --- /dev/null +++ b/_docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp.md @@ -0,0 +1,122 @@ +--- +title: "Deploy to a VM via SCP" +description: "Deploy your application to Tomcat using SCP" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/create-codefresh-account) +- A distribution of [Tomcat](https://tomcat.apache.org/download-90.cgi){:target="\_blank"} setup on a remote server (running with port 8080 exposed) + +## The example Java Application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/scp-war-app){:target="\_blank"}. + +The example application is a simple Hello World Java application using the [Spark Java framework](http://sparkjava.com/){:target="\_blank"}: + +{% include image.html +lightbox="true" +file="/images/examples/deployments/scp-hello-world.png" +url="/images/examples/deployments/scp-hello-world.png" +alt="Hello World!" +caption="Hello World!" +max-width="100%" +%} + + +```java + @Override + public void init() { + get("/hello", (req, res) -> "Hello World"); + } +``` + +## Create the pipeline + +Our pipeline has three stages: clone, package, and transfer. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/scp-pipeline.png" +url="/images/examples/deployments/scp-pipeline.png" +alt="SCP pipeline" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI. It will automatically clone the project for you. + +Note that you need to change the environment variables under the `transfer` step to your respective values. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/example + +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "package" + - "transfer" + +steps: + clone: + title: "Cloning repository..." + type: "git-clone" + stage: "clone" + arguments: + repo: "codefresh-contrib/scp-war-app" + + package: + title: "Packaging war..." + type: "freestyle" + stage: "package" + arguments: + image: "maven:3.5.2-jdk-8-alpine" + working_directory: "${{clone}}" + commands: + - "mvn -Dmaven.repo.local=/codefresh/volume/m2_repository clean package" + + transfer: + title: "Transferring war to Tomcat..." + type: "freestyle" + stage: "transfer" + arguments: + image: "ictu/sshpass:latest" + working_directory: "${{package}}/target" + environment: + - USER= + - HOST= + - PASSWORD= + - TOMCAT_DIR= + commands: + - "echo | ssh-keygen -P '' -t rsa" + - "sshpass -p $PASSWORD ssh-copy-id -i /root/.ssh/id_rsa.pub -o StrictHostKeyChecking=no $USER@$HOST" + - "scp sparkjava-hello-world-1.0.war $USER@$HOST:$TOMCAT_DIR" +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: + +1. Clones the main repository through the [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Installs the dependencies via Maven and packages our `war` file through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Transfers our application via scp to a Tomcat server through another freestyle step. + +Note that you will need to change the listed environment variables accordingly, either through the YAML itself, or through your pipeline settings: + +{% include image.html +lightbox="true" +file="/images/examples/deployments/scp-variables.png" +url="/images/examples/deployments/scp-variables.png" +alt="Pipeline variables" +caption="Pipeline variables" +max-width="100%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/deploy-with-kustomize.md b/_docs/example-catalog/cd-examples/deploy-with-kustomize.md new file mode 100644 index 000000000..21b3089fa --- /dev/null +++ b/_docs/example-catalog/cd-examples/deploy-with-kustomize.md @@ -0,0 +1,244 @@ +--- +title: "Deploy with Kustomize" +description: "Deploy your services to Kubernetes using Kustomize" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Kustomize](https://kustomize.io) is a tool included with kubectl 1.14 that "lets you customize raw, template-free YAML files for multiple purposes, leaving the original YAML untouched and usable as is." + +Kustomize is more of an overlay engine, as opposed to a templating engine. You create a base configuration and overlays. Your overlays contain a *kustomization.yaml* file, and any variants/changes are applied over top of the base configuration. Kustomize does not use templates at all. + +While it is good for simple scenarios, we suggest that you use Helm for managing your Kubernetes applications. Helm is a full package manager for Kubernetes manifests that also provides templating capabilities. See [this example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/){:target="\_blank"} for more information. + +## The example application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/kustomize-sample-app){:target="\_blank"}. + +The sample application is a simple Spring Boot web app, that displays an environment variable, `MY_MYSQL_DB` on the page: + +```java +public class HelloController { + + String my_sql_db = System.getenv("MY_MYSQL_DB"); + + @RequestMapping("/") + public String index() { + return my_sql_db; + } +``` + +The project contains a [base](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#base){:target="\_blank"} and two [overlays](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#overlay){:target="\_blank"}, one for a staging environment and one for production. + +The base manifest holds a dummy variable for `MY_MYSQL_DB` which will be overlayed once we call the kustomize command in our pipeline. + +`base/deployment.yaml` +```yaml +... + env: + - name: MY_MYSQL_DB + valueFrom: + configMapKeyRef: + name: the-map + key: mysqlDB +``` + +We will overlay on top of the manifests a different value for `MY_MYSQL_DB` for the staging environment and production environment. + +`overlays/staging/config-map.yaml` +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: the-map +data: + mysqlDB: "staging-mysql.example.com:3306" +``` + +`overlays/production/config-map.yaml` +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: the-map +data: + mysqlDB: "prod-mysql.example.com:3306" +``` + +In addition, for the production environment, the number of replicas will be overlayed to 3 instead of 1 (as [defined in the base deployment](https://github.com/codefresh-contrib/kustomize-sample-app/blob/32e683f82940de0bf2de2da40fa6b150e2b24b23/base/deployment.yaml#L8)){:target="\_blank"}. + +`overlays/production/deployment.yaml` +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: the-deployment +spec: + replicas: 3 +``` + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/create-codefresh-account) + +- A Kubernetes cluster [connected to your Codefresh account](https://codefresh.io/docs/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) + +## Create the staging environment pipeline + +This pipeline will have two stages: clone and deploy. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-staging-pipeline.png" +url="/images/examples/deployments/k8s-kustomize-staging-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line pipeline editor of the Codefresh UI. However, make sure to replace cluster context for the kubectl command under the arguments section with your own that you integrated with Codefresh. It will automatically clone the project for you and deploy. + +`staging-codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ + +version: "1.0" +# Stages can help you organize your steps in stages + +stages: + - clone + - deploy + +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: clone + arguments: + repo: https://github.com/codefresh-contrib/kustomize-sample-app.git + git: github + revision: master + + deploy: + title: Deploying to Staging using Kustomize... + type: freestyle + stage: deploy + working_directory: ${{clone}} + arguments: + image: codefresh/kubectl:1.14.9 + commands: + - kubectl config use-context anna-sandbox@codefresh-support + - kubectl apply -k overlays/staging +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: +1. Clones the main repository through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Connects to our Kubernetes cluster we have integrated with Codefresh using `kubectl`, and deploys the application as a staging environment with the appropriate value for `MY_MYSQL_DB` as defined in our configMap using Kustomize (the `-k` flag), through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +>If you are using `kubectl` prior to 1.14, you can use the following command to deploy with Kustomize: + `kustomize build overlays/production | kubectl apply -f` + +## Create the production environment pipeline + +Likewise, this pipeline will have two stages: clone and deploy. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-prod-pipeline.png" +url="/images/examples/deployments/k8s-kustomize-prod-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI and remember to replace cluster context for the kubectl command again with your own. Click Save and Run and it will automatically clone the project for you. + +`prod-codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ + +version: "1.0" +# Stages can help you organize your steps in stages + +stages: + - clone + - deploy + +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: clone + arguments: + repo: https://github.com/codefresh-contrib/kustomize-sample-app.git + git: github + revision: master + + deploy: + title: Deploying to Production using Kustomize... + type: freestyle + stage: deploy + working_directory: ${{clone}} + arguments: + image: codefresh/kubectl:1.14.9 + commands: + - kubectl config use-context anna-sandbox@codefresh-support + - kubectl apply -k overlays/production +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: + +1. Clones the main repository through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Connects to our Kubernetes cluster we have integrated with Codefresh using `kubectl`, and deploys the application as a staging environment with the appropriate value for `MY_MYSQL_DB` as defined in our configMap using Kustomize (the `-k` flag), through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + +>Note that if you are using kubectl prior to 1.14, you can use the following command to deploy with Kustomize: +>`kustomize build overlays/production | kubectl apply -f` + +## Verification + +After you run these pipelines, your deployments are displayed in the [Kubernetes dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#accessing-the-kubernetes-dashboard). + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-dashboard.png" +url="/images/examples/deployments/k8s-kustomize-dashboard.png" +alt="Codefresh Kubernetes Deployments" +caption="Codefresh Kubernetes Deployments" +max-width="100%" +%} + +You can test that the application deployed correctly to both environments by accessing the endpoints: + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-staging-endpoint.png" +url="/images/examples/deployments/k8s-kustomize-staging-endpoint.png" +alt="Staging endpoint" +caption="Staging endpoint" +max-width="100%" +%} + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-prod-endpoint.png" +url="/images/examples/deployments/k8s-kustomize-prod-endpoint.png" +alt="Production endpoint" +caption="Production endpoint" +max-width="100%" +%} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Deployment options to Kubernetes]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes) +[Running custom kubectl commands]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/) +[Deploy with Helm]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/) + diff --git a/_docs/example-catalog/cd-examples/docker-swarm.md b/_docs/example-catalog/cd-examples/docker-swarm.md new file mode 100644 index 000000000..ad6dfbe1f --- /dev/null +++ b/_docs/example-catalog/cd-examples/docker-swarm.md @@ -0,0 +1,221 @@ +--- +title: "Deploy to Docker SWARM" +description: "Deploy to Docker Swarm with Codefresh" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/docker-swarm/ + - /docs/deploy-to-docker-swarm/ + - /docs/deploy-your-containers/docker-swarm/ +toc: true +--- + +Codefresh can easily deploy your application to [Docker Swarm](https://docs.docker.com/engine/swarm/){:target="\_blank"} using [Codefresh pipelines]({{site.baseurl}}/docs/pipelines/pipelines/). + +You will need to provide: + +1. The `docker-stack.yml` that contains the definition of the application +1. The host where your Docker Swarm is running +1. An SSH key that Codefresh can use to access remotely the Docker Swarm host +1. The stack name that will be used once the application is deployed + +All this information will be passed to the pipeline in the form of build parameters. + + +## Example application + +For an example Docker Swarm application, see [https://github.com/codefreshdemo/example-voting-app](https://github.com/codefreshdemo/example-voting-app){:target="\_blank"} + +To launch it locally you need to download [Docker](https://www.docker.com/products/overview){:target="\_blank"}. +If you are on Mac or Windows, [Docker Compose](https://docs.docker.com/compose){:target="\_blank"} is automatically installed. +On Linux, make sure you have the latest version of [Compose](https://docs.docker.com/compose/install/){:target="\_blank"}. + + +Run in this root directory: + +{% highlight bash %} +{% raw %} + +docker-compose up + +{% endraw %} +{% endhighlight %} + +The app runs at [http://localhost:5000](http://localhost:5000), and the results are at [http://localhost:5001](http://localhost:5001). + +Alternately, if you want to run it on a Docker Swarm, first make sure you have a Swarm. +If you don't, run: + +{% highlight bash %} +{% raw %} + +docker swarm init + +{% endraw %} +{% endhighlight %} + +Once you have your swarm, in this directory run: + +{% highlight bash %} +{% raw %} + +docker stack deploy --compose-file docker-stack.yml vote + +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_warning}} +The swarm master must have Python installed. +{{site.data.callout.end}} + +## Deploy to Remote Swarm with Codefresh + +First you need to set up the following environment variables in your Codefresh pipeline: + +{: .table .table-bordered .table-hover} +| `RDOCKER_HOST` | remote Docker Swarm master machine, accessible over SSH (for example, ubuntu@ec2-public-ip) | +| `STACK_NAME` | is new Docker stack name (use \"vote\", for example) | +| `SSH_KEY` | private SSH key, used to access Docker Swarm master machine | +| `SPLIT_CHAR` | split character, you've used to replace `newline` in SSH key. Recommendation: use `,` (`comma` character). | + +The `SSH_KEY` variable has the contents of the [SSH key](https://www.ssh.com/ssh/public-key-authentication){:target="\_blank"} that can access the Docker Swarm host. Currently, in order to pass SSH key through Codefresh UI, you need to convert it to single line string (replacing `newline` with `comma`), like this: + +{% highlight bash %} +{% raw %} +SSH_KEY=$(cat ~/.ssh/my_ssh_key_file | tr '\n' ',') +{% endraw %} +{% endhighlight %} + +The `SPLIT_CHAR` variable should hold the replacement character that was used for the SSH key (in the example above it is the comma character) + +{% include image.html +lightbox="true" +file="/images/2f1884a-codefresh_env_vars.png" +url="/images/2f1884a-codefresh_env_vars.png" +alt="Docker Swarm build parameters" +caption="Docker Swarm build parameters" +max-width="70%" +%} + + +## Deploy to Docker Swarm with a YAML step + +Once you have defined all the variables, deploy to your cluster using the following [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + +deploy_to_swarm: + image: codefresh/remote-docker + working_directory: ${{main_clone}} + commands: + - rdocker ${{RDOCKER_HOST}} docker stack deploy --compose-file docker-stack.yml ${{STACK_NAME}} + environment: + - SSH_KEY=${{SSH_KEY}} + when: + branch: + only: + - master + +{% endraw %} +{% endhighlight %} + +You can also pass custom credentials like this: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + +deploy_to_swarm: + image: codefresh/remote-docker + working_directory: ${{main_clone}} + commands: + - rdocker ${{RDOCKER_HOST}} docker login ${{MY_REGISTRY}} -u ${{MY_REGISTRY_USER}} -p ${{MY_REGISTRY_PASSWORD}} \&\& docker stack deploy --compose-file docker-compose.yml --with-registry-auth ${{STACK_NAME}} + environment: + - SSH_KEY=${{SSH_KEY}} + when: + branch: + only: + - master +{% endraw %} +{% endhighlight %} + + + +## Create a CI/CD pipeine for Docker Swarm + +Here is the complete pipeline: + +{% include +image.html +lightbox="true" +file="/images/examples/docker-swarm/docker-swarm-pipeline.png" +url="/images/examples/docker-swarm/docker-swarm-pipeline.png" +alt="Docker Swarm pipeline" +caption="Docker Swarm pipeline" +max-width="100%" +%} + +And here is the pipeline definition: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefreshdemo/example-voting-app' + revision: master + git: github-1 + MyResultDockerImage: + title: Building Result Docker Image + stage: build + type: build + image_name: resultApp + working_directory: ./result/ + tag: master + dockerfile: Dockerfile + MyVoteDockerImage: + title: Building Vote Docker Image + stage: build + type: build + image_name: voteApp + working_directory: ./vote/ + tag: master + dockerfile: Dockerfile + MyWorkerDockerImage: + title: Building Worker Docker Image + stage: build + type: build + image_name: workedApp + working_directory: ./worker/ + tag: master + dockerfile: Dockerfile + DeployToSwarmNow: + image: codefresh/remote-docker + working_directory: ${{main_clone}} + stage: deploy + commands: + - rdocker ${{RDOCKER_HOST}} docker login ${{MY_REGISTRY}} -u ${{MY_REGISTRY_USER}} -p ${{MY_REGISTRY_PASSWORD}} \&\& docker stack deploy --compose-file docker-compose.yml --with-registry-auth ${{STACK_NAME}} + environment: + - SSH_KEY=${{SSH_KEY}} +{% endraw %} +{% endhighlight %} + +The values of `MY_REGISTRY`, `MY_REGISTRY_USER` and `MY_REGISTRY_PASSWORD` depend upon the type of [your connected registry]({{site.baseurl}}/docs/integration/docker-registries/). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + diff --git a/_docs/example-catalog/cd-examples/elastic-beanstalk.md b/_docs/example-catalog/cd-examples/elastic-beanstalk.md new file mode 100644 index 000000000..cd0b6949d --- /dev/null +++ b/_docs/example-catalog/cd-examples/elastic-beanstalk.md @@ -0,0 +1,136 @@ +--- +title: "Deploy to Elastic Beanstalk" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/elastic-beanstalk/ + - /docs/deploy-your-containers/elastic-beanstalk/ +toc: true +--- + + +## Prerequisites + +- Configured Application in Elastic Beanstalk service
      + See: [http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/GettingStarted.html](http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/GettingStarted.html){:target="_blank"} + + +## Deployment with Codefresh +- Add encrypted environment variables for AWS credentials: + * `AWS_ACCESS_KEY_ID` + * `AWS_SECRET_ACCESS_KEY` + +- Provide the following environment variables: + * `AWS_REGION` + * `AWS_ENV_NAME` + * `AWS_VERSION` + * `AWS_BRANCH` + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png" +alt="codefresh_eb_env_vars.png" +max-width="40%" +%} + +{{site.data.callout.callout_info}} +{% raw %} +The ``${{AWS_VERSION}}`` of application you can find in the Elastic Beanstalk service. +{% endraw %} +{{site.data.callout.end}} + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_version_label.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_version_label.png" +alt="codefresh_eb_version_label.png" +max-width="40%" +%} + +{{site.data.callout.callout_info}} +{% raw %} +The ``${{AWS_ENV_NAME}}`` of application you can find in the Elastic Beanstalk service. +{% endraw %} +{{site.data.callout.end}} + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_environment.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_environment.png" +alt="codefresh_eb_environment.png" +max-width="40%" +%} + +Add the following step to codefresh.yml: + + `deploy_step` +{% highlight yaml %} +{% raw %} +deploy-elastic-beanstalk: + fail-fast: false + image: garland/aws-cli-docker:latest + commands: + - sh -c "aws configure set region '${{AWS_REGION}}' && aws elasticbeanstalk update-environment --environment-name '${{AWS_ENV_NAME}}' --version-label '${{AWS_VERSION}}' " + when: + condition: + all: + masterBranch: "'${{CF_BRANCH}}' == '${{AWS_BRANCH}}'" +{% endraw %} +{% endhighlight %} + +{:.text-secondary} +## Deployment Flow +- Go to the Elastic Beanstalk service and create an application and environment. + + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png" +alt="codefresh_eb_environment.png" +max-width="40%" +%} + +- Perform the following commands from root of your project: + * eb init + * eb create {% raw %}`${{AWS_ENV_NAME}}`{% endraw %} + + + +>Note: + If you don't have awsebcli - install EB CLI [http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3-install.html](http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3-install.html){:target="_blank"}. + + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_health.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_health.png" +alt="codefresh_eb_health.png" +max-width="40%" +%} + +- Add this repository to Codefresh, provide the necessary environments variables and build this service + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png" +alt="codefresh_eb_cf_step_deploy.png" +max-width="40%" +%} + +## Example + +* [cf-example-deploy-elasticbeanstalk](https://github.com/codefreshdemo/cf-example-deploy-elasticbeanstalk){:target="_blank"} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/helm.md b/_docs/example-catalog/cd-examples/helm.md new file mode 100644 index 000000000..1b104663f --- /dev/null +++ b/_docs/example-catalog/cd-examples/helm.md @@ -0,0 +1,225 @@ +--- +title: "Deploy with Helm" +description: "Use Helm in a Codefresh pipeline" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Helm](https://helm.sh/){:target=\_blank"} is the package manager for Kubernetes. +Codefresh has comprehensive support for Helm: + +* Free [built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) with each Codefresh account +* [Helm chart dashboard]({{site.baseurl}}/docs/docs/deployments/add-helm-repository/) to track your charts +* [Helm Release dashboard]({{site.baseurl}}/docs/docs/deployments/helm-releases-management/) to view your deployments +* [Environment dashsboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) to view Helm releases +* [Helm promotion dashboard]({{site.baseurl}}/docs/deployments/helm-environment-promotion/) to promote Helm releases +* Add any external Helm repository on any other cloud provider + +Codefresh also provides a [pipeline step]({{site.baseurl}}/docs/new-helm/using-helm-in-codefresh-pipeline/) for deploying with Helm. + +For more insights on Helm charts see also our [Helm best practices]({{site.baseurl}}/docs/new-helm/helm-best-practices/) guide. + + +## The example Helm project + +You can see the example project at [https://github.com/codefresh-contrib/helm-sample-app](https://github.com/codefresh-contrib/helm-sample-app){:target=\_blank"}. The repository contains a simple Go application, a Dockerfile and an example chart. + + +## Prerequisites + +[At least one Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) in your Codefresh account. + +>Notice that if you still use Helm 2 you should also have installed the server side of Helm 2 (Tiller) using `helm init`. This command is best run from the cloud console of your cluster. The respective pipelines of this guide are in the [helm-2 branch](https://github.com/codefresh-contrib/helm-sample-app/tree/helm-2){:target=\_blank"}. + + + +## CI/CD pipeline with Helm deployment + +It is possible to deploy directly a Helm chart as it exists on the filesystem. This is not the recommended way to use Helm, because you are bypassing the Helm chart repository, but it is certainly the simplest Helm pipeline possible. + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-deploy-pipeline.png" +url="/images/examples/helm/helm-deploy-pipeline.png" +alt="Pipeline for Helm deployment" +caption="Pipeline for Helm deployment" +max-width="100%" +%} + +Here is the whole pipeline: + + `codefresh-do-not-store.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + clone: + title: Cloning main repository... + stage: prepare + type: git-clone + arguments: + repo: codefresh-contrib/helm-sample-app + revision: master + git: github + build: + title: Building Docker Image + stage: build + type: build + working_directory: ./helm-sample-app + arguments: + image_name: helm-sample-app-go + tag: multi-stage + dockerfile: Dockerfile + deploy: + title: Deploying Helm Chart + type: helm + stage: deploy + working_directory: ./helm-sample-app + arguments: + action: install + chart_name: charts/helm-example + release_name: my-go-chart-prod + helm_version: 3.0.2 + kube_context: my-demo-k8s-cluster + custom_values: + - 'buildID=${{CF_BUILD_ID}}' + - 'image_pullPolicy=Always' + - 'image_tag=multi-stage' + - 'replicaCount=3' + - 'image_pullSecret=codefresh-generated-r.cfcr.io-cfcr-default' +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +1. Builds a docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) +1. Deploys the Helm chart to a cluster named `my-demo-k8s-cluster` using the Helm step [from the Step Marketplace](https://codefresh.io/steps/step/helm){:target=\_blank"}. + +In this example, `charts/helm-example` refers to the [filesystem location in the code](https://github.com/codefresh-contrib/helm-sample-app/tree/master/charts/helm-example){:target=\_blank"} that was just checked out. + +The deployment will be visible in the [Helm releases dashboard]({{site.baseurl}}/docs/new-helm/helm-releases-management/). + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-release.png" +url="/images/examples/helm/helm-release.png" +alt="Helm release view" +caption="Helm release view" +max-width="100%" +%} + +If you want to run this example yourself, make sure to edit the chart and put your own values there for the Docker image. + +## CI/CD pipeline with Helm deployment that also stores the chart + +It is recommended to use a Helm repository to store your chart before deploying it. This way you know what is deployed in your clusters +and you can also reuse charts in other installations. + +First of all you need to import in your pipeline from the [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) the settings for the internal Helm repository (or any other external repository that you have setup in Codefresh). + This will make available the internal Helm repository to your pipeline so that it can push/pull Helm charts from it. + + {% include image.html + lightbox="true" + file="/images/examples/helm/import-helm-configuration.png" + url="/images/examples/helm/import-helm-configuration.png" + alt="Using the default Helm repository in a Pipeline" + caption="Using the default Helm repository in a Pipeline" + max-width="40%" + %} + +Once that is done you can change your pipeline to also store the chart first and *then* deploy it. + + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-push-and-deploy-pipeline.png" +url="/images/examples/helm/helm-push-and-deploy-pipeline.png" +alt="Pipeline for Helm deployment that stores chart" +caption="Pipeline for Helm deployment that stores chart" +max-width="100%" +%} + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - store + - deploy +steps: + clone: + title: Cloning main repository... + stage: prepare + type: git-clone + arguments: + repo: codefresh-contrib/helm-sample-app + revision: master + git: github + build: + title: Building Docker Image + stage: build + type: build + working_directory: ./helm-sample-app + arguments: + image_name: helm-sample-app-go + tag: multi-stage + dockerfile: Dockerfile + store: + title: Storing Helm Chart + type: helm + stage: store + working_directory: ./helm-sample-app + arguments: + action: push + chart_name: charts/helm-example + kube_context: my-demo-k8s-cluster + deploy: + type: helm + stage: deploy + working_directory: ./helm-sample-app + arguments: + action: install + chart_name: charts/helm-example + release_name: my-go-chart-prod + helm_version: 3.0.2 + kube_context: my-demo-k8s-cluster + custom_values: + - 'buildID=${{CF_BUILD_ID}}' + - 'image_pullPolicy=Always' + - 'image_tag=multi-stage' + - 'replicaCount=3' + - 'image_pullSecret=codefresh-generated-r.cfcr.io-cfcr-default' +{% endraw %} +{% endhighlight %} + + +After you finish running your pipeline, not only the deployment will take place, but you will also see your chart in your [Helm Chart dashboard]({{site.baseurl}}/docs/new-helm/add-helm-repository/): + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-chart.png" +url="/images/examples/helm/helm-chart.png" +alt="Stored Helm chart" +caption="Stored Helm chart" +max-width="80%" +%} + +It is also possible to [run your own Helm commands]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#example-custom-helm-commands) in a Codefresh pipeline. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/cd-examples/import-data-to-mongodb.md b/_docs/example-catalog/cd-examples/import-data-to-mongodb.md new file mode 100644 index 000000000..68a6c79a3 --- /dev/null +++ b/_docs/example-catalog/cd-examples/import-data-to-mongodb.md @@ -0,0 +1,60 @@ +--- + +title: "Import data to MongoDB" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/import-data-to-mongodb-in-composition/ + - /docs/on-demand-test-environment/example-compositions/import-data-to-mongodb/ +toc: true +--- + +If you want to import/restore or to do something else before using MongoDB in your application, you can look at the following example. + +You just need to create Dockerfile for mongo seed service and provide the command to prepare MongoDB. In this case it's command `mongoimport` + + `Dockerfile mongo_seed` +{% highlight docker %} +FROM mongo +COPY init.json /init.json +CMD mongoimport --host mongodb --db exampleDb --collection contacts --type json --file /init.json --jsonArray +{% endhighlight %} + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Let's quickly review the contents of this file: + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + mongodb: + image: mongo + command: mongod --smallfiles + ports: + - 27017 + + mongo_seed: + image: ${{mongo_seed}} + links: + - mongodb + + client: + image: ${{build_prj}} + links: + - mongodb + ports: + - 9000 + environment: + - MONGO_URI=mongodb:27017/exampleDb +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +You can add the following example to your GitHub or Bitbucket account, and build the [example](https://github.com/codefreshdemo/cf-example-manage-mongodb){:target="_blank"}. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/nodejs-angular2-mongodb.md b/_docs/example-catalog/cd-examples/nodejs-angular2-mongodb.md new file mode 100644 index 000000000..f4e698393 --- /dev/null +++ b/_docs/example-catalog/cd-examples/nodejs-angular2-mongodb.md @@ -0,0 +1,52 @@ +--- +title: "NodeJS + Angular2 + MongoDB" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/nodejs-angular2-mongodb/ + - /docs/on-demand-test-environment/example-compositions/nodejs-angular2-mongodb/ +toc: true +--- +This tutorial will walk you through the process of adding the following: + +- Build client +- Build server +- Launch composition + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Let's quickly review the contents of this file: + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + mongodb: + image: mongo + ports: + - 28017 + server: + image: ${{build_server}} + environment: + - MONGO_URI=mongodb://mongodb/exampleDb + links: + - mongodb + ports: + - 9000 + client: + image: ${{build_client}} + ports: + - 3000 +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/nodejs-angular2-mongo){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/nomad.md b/_docs/example-catalog/cd-examples/nomad.md new file mode 100644 index 000000000..a7e78d797 --- /dev/null +++ b/_docs/example-catalog/cd-examples/nomad.md @@ -0,0 +1,225 @@ +--- +title: "Deploy to Nomad" +description: "Deploy Docker images to a Nomad cluster with Codefresh" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +Even though Codefresh has great support for Kubernetes and Helm deployments, there is no lock-in on using just Kubernetes. Codefresh can deploy on any infrastructure. + + +[Nomad](https://www.nomadproject.io/){:target=\_blank"} is an alternative scheduling platform from Hashicorp. It supports docker containers (like Kubernetes), but you can also use Nomad to schedule VMs, Java apps, Go apps or any other standalone executable. + +There are several public Docker Images with Nomad, so it is very easy to use Codefresh pipelines to deploy to a Nomad cluster. + + +{% include image.html +lightbox="true" +file="/images/examples/nomad/nomad-ci-pipeline.png" +url="/images/examples/nomad/nomad-ci-pipeline.png" +alt="Deploying to Nomad with Codefresh" +caption="Deploying to Nomad with Codefresh" +max-width="80%" +%} + +In this example, we will use the image at [https://hub.docker.com/r/djenriquez/nomad](https://hub.docker.com/r/djenriquez/nomad){:target=\_blank"}. + +## The example Nomad project + +You can see the example project at [https://github.com/codefresh-contrib/nomad-sample-app](https://github.com/codefresh-contrib/nomad-sample-app){:target=\_blank"}. The repository contains a simple job specification that deploys a docker container on nomad cluster. + + +Here is the whole job file: + + `docker-job.hcl` +{% highlight hcl %} +{% raw %} +job "example-job" { + # Specify this job should run in the region named "us". Regions + # are defined by the Nomad servers' configuration. + #region = "us" + + # Spread the tasks in this job between us-west-1 and us-east-1. + datacenters = ["dc1"] + + # Run this job as a "service" type. Each job type has different + # properties. See the documentation below for more examples. + type = "service" + + # Specify this job to have rolling updates, two-at-a-time, with + # 30 second intervals. + update { + stagger = "30s" + max_parallel = 1 + } + + # A group defines a series of tasks that should be co-located + # on the same client (host). All tasks within a group will be + # placed on the same host. + group "example-group" { + # Specify the number of these tasks we want. + count = 3 + + # Create an individual task (unit of work). This particular + # task utilizes a Docker container to front a web application. + task "example-task" { + # Specify the driver to be "docker". Nomad supports + # multiple drivers. + driver = "docker" + + # Configuration is specific to each driver. + config { + image = "r.cfcr.io/$CF_ACCOUNT/$CF_REPO_NAME:$CF_BRANCH_TAG_NORMALIZED" + + auth { + username = "$CF_ACCOUNT" + password = "$CFCR_LOGIN_TOKEN" + server_address = "r.cfcr.io" + } + + port_map { + http = 8080 + } + } + + # The service block tells Nomad how to register this service + # with Consul for service discovery and monitoring. + service { + # This tells Consul to monitor the service on the port + # labelled "http". Since Nomad allocates high dynamic port + # numbers, we use labels to refer to them. + port = "http" + + check { + type = "http" + path = "/" + interval = "10s" + timeout = "2s" + } + } + + # Specify the maximum resources required to run the task, + # include CPU, memory, and bandwidth. + resources { + cpu = 500 # MHz + memory = 128 # MB + + network { + mbits = 100 + + + port "http" {} + + + + } + } + } + } +} + +{% endraw %} +{% endhighlight %} + +Notice that the job specification has several [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/) embedded. We will use [envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html){:target=\_blank"} in our pipeline to replace +them with the correct values. + +## Prerequisites + +You need to create a Codefresh account and have a Nomad cluster running. You need to decide on how Codefresh will communicate +with the nomad cluster. In this simple example we just use the `NOMAD_ADDR` variable to point the nomad client to our cluster. In a production environment you should use proper [ACL](https://www.nomadproject.io/guides/security/acl.html){:target=\_blank"} and [certificate](https://www.nomadproject.io/guides/security/securing-nomad.html){:target=\_blank"} variables as well. + + +In this example the Nomad cluster is already setup on a VM at Google cloud. + +You also need to create a [token for the Docker registry]({{site.baseurl}}/docs/integrations/docker-registries/) so that Nomad can pull your private images on the cluster. + +## Create a CI/CD pipeline for Nomad deployments + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "deploy" +steps: + main_clone: + type: "git-clone" + title: "Clone main repository..." + repo: "codefresh-contrib/nomad-sample-app" + revision: "${{CF_BRANCH}}" + stage: "clone" + build: + title: "Building Docker Image" + type: "build" + image_name: "nomad-sample-app" + tag: "${{CF_BRANCH_TAG_NORMALIZED}}" + dockerfile: "Dockerfile" + stage: "build" + prepareJob: + title: "Preparing Nomad job" + image: bhgedigital/envsubst + stage: deploy + commands: + - envsubst < docker-job.hcl > docker-job-export.hcl + - cat docker-job-export.hcl + runJob: + title: "Deploying Nomad job" + image: djenriquez/nomad + stage: deploy + commands: + - nomad run docker-job-export.hcl +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Creates a Docker image for a simple Go application through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). The image is automatically pushed to the default Docker registry. +1. Replaces all variables in the job spec by running `envsubst`. These include: + * The Registry token so that Nomad can access the default Docker registry + * The docker image name and tag to be deployed +1. Runs the job to deploy the image to Nomad through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + +Run the pipeline and see your deployment succeed. + +Here are the environment variables defined for this pipeline. + +{% include image.html +lightbox="true" +file="/images/examples/nomad/nomad-variables.png" +url="/images/examples/nomad/nomad-variables.png" +alt="Pipeline variables for Nomad deployments" +caption="Pipeline variables for Nomad deployments" +max-width="50%" +%} + + +The `NOMAD_ADDR` variable is holding the URL of the cluster. The `CFCR_LOGIN_TOKEN` variable holds authentication for the Codefresh Docker registry. + +## Verify the deployment + +Nomad also comes with its own UI that can show you the result of a deployment. + +{% include image.html +lightbox="true" +file="/images/examples/nomad/nomad-ui-deployment.png" +url="/images/examples/nomad/nomad-ui-deployment.png" +alt="Nomad UI deployment" +caption="Nomad UI deployment" +max-width="80%" +%} + +You can also use [Terraform]({{site.baseurl}}/docs/example-catalog/cd-examples/terraform/) in Codefresh pipelines. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/packer-gcloud.md b/_docs/example-catalog/cd-examples/packer-gcloud.md new file mode 100644 index 000000000..58d12aded --- /dev/null +++ b/_docs/example-catalog/cd-examples/packer-gcloud.md @@ -0,0 +1,132 @@ +--- +title: "Deploy to a Virtual Machine" +description: "Deploy to Google Cloud in a Codefresh pipeline with Packer" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +Even though Codefresh is Kubernetes-native and designed for containers, it can still deploy traditional applications in the form of Virtual Machines to any Cloud provider. + +In this example, we will use [Packer](http://www.packer.io/){:target="\_blank"} to package an application into a VM disk image that will then be launched in Google Cloud. +Because Packer itself is already offered [in a Docker container](https://hub.docker.com/r/hashicorp/packer/){:target="\_blank"}, it is very easy to run Packer in a Codefresh pipeline. + +Google also offers a [Docker image for GCloud](https://hub.docker.com/r/google/cloud-sdk/){:target="\_blank"} making the launching of the VM straightforward in a Codefresh pipeline. + + +{% include image.html +lightbox="true" +file="/images/examples/packer-gcloud/packer-codefresh-pipeline.png" +url="/images/examples/packer-gcloud/packer-codefresh-pipeline.png" +alt="Running Packer inside Codefresh" +caption="Running Packer inside Codefresh" +max-width="80%" +%} + +This Codefresh pipeline creates a VM image and then uses it to launch a Google Compute instance. + + +## The example Packer/Gcloud project + +You can see the example project at [https://github.com/codefresh-contrib/vm-packer-sample-app](https://github.com/codefresh-contrib/vm-packer-sample-app){:target="\_blank"}. The repository contains a simple Go application as well as a packer template. + +You can play with it locally after installing the `packer` and `gcloud` executables. + +## Prerequisites + +You need to create a Codefresh account and a Google account first. Then you need to create a [Service account Key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys){:target="\_blank"} which will allow `packer` and `gcloud` to communicate with Google cloud. + + +Add your service account json as a pipeline variable called `SERVICE_ACCOUNT`. The content of this variable will be used +in order to authenticate to Google cloud. + +{% include image.html +lightbox="true" +file="/images/examples/packer-gcloud/service-account-variable.png" +url="/images/examples/packer-gcloud/service-account-variable.png" +alt="Using a Service Account JSON in Codefresh" +caption="Using a Service Account JSON in Codefresh" +max-width="50%" +%} + +## Create a CI/CD pipeline for Packer/GCloud + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: 'codefresh-contrib/vm-packer-sample-app' + git: github + revision: 'master' + stage: prepare + SetupAuth: + title: 'Setup GCloud Auth' + image: 'alpine' + stage: prepare + commands: + - echo $SERVICE_ACCOUNT > account.json + BuildMyApp: + title: Compiling App code + stage: build + image: 'golang:1.12' + commands: + - go build -o sample src/sample/trivial-web-server.go + CreatePackerImage: + title: Baking VM image + stage: build + image: 'hashicorp/packer' + commands: + - packer validate my-google-cloud-example.json + - packer build -force my-google-cloud-example.json + DeployToVM: + title: Deploying to VM + stage: deploy + image: 'google/cloud-sdk' + commands: + - gcloud auth activate-service-account --key-file=account.json + - gcloud config set project firstkubernetes-176201 + - gcloud compute instances create packer-demo-codefresh --image codefresh-simple-ubuntu-vm --zone europe-west1-b --metadata-from-file startup-script=startup.sh --tags http-server --preemptible --quiet + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Saves the content of the variable that holds the Google account as a file called `account.json`. +1. Compiles the Go application through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Runs `packer` to create a VM image based on Ubuntu that also contains the simple Go application. +1. Runs `gcloud` to launch a VM with the image that was just created. + + +Run the pipeline and see your deployment succeed. You can customize the image by editing the [Packer template](https://github.com/codefresh-contrib/vm-packer-sample-app/blob/master/my-google-cloud-example.json){:target="\_blank"}. + +Once the VM has finished launching you can access it with your web browser. + +{% include image.html +lightbox="true" +file="/images/examples/packer-gcloud/web-app-url.png" +url="/images/examples/packer-gcloud/web-app-url.png" +alt="Accessing the VM application" +caption="Accessing the VM application" +max-width="70%" +%} + + +You can follow the same procedure for any other cloud that has an API/CLI (such as AWS, Azure, Digital Ocean etc). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/pulumi.md b/_docs/example-catalog/cd-examples/pulumi.md new file mode 100644 index 000000000..74c1f3f74 --- /dev/null +++ b/_docs/example-catalog/cd-examples/pulumi.md @@ -0,0 +1,116 @@ +--- +title: "Deploy with Pulumi" +description: "Use Pulumi in a Codefresh pipeline with Docker" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Pulumi](https://pulumi.io/){:target="\_blank"} is a platform for *Infrastructure as Code*. It works like Terraform but allows you to use a proper programming language (TypeScript, Python, Go) to describe your infrastructure (instead of a configuration language). + +You can use Pulumi to deploy to Kubernetes or any other supported cloud platform. Because Pulumi itself is already offered [in a Docker container](https://hub.docker.com/r/pulumi/pulumi), it is very easy to run Pulumi in a Codefresh pipeline. + + +{% include image.html +lightbox="true" +file="/images/examples/pulumi/pulumi-pipeline.png" +url="/images/examples/pulumi/pulumi-pipeline.png" +alt="Running Pulumi inside Codefresh" +caption="Running Pulumi inside Codefresh" +max-width="80%" +%} + +## The example Pulumi project + +You can see the example project at [https://github.com/codefresh-contrib/pulumi-sample-app](https://github.com/codefresh-contrib/pulumi-sample-app){:target="\_blank"}. The repository contains a simple Pulumi stack based on Kubernetes and TypeScript. + +You can play with it locally after installing the `pulumi` executable. + +## Prerequisites + +You need to create a Codefresh account and a Pulumi account first. Then you need to create a [Pulumi token](https://app.pulumi.com/account/tokens){:target="\_blank"} which will allows Codefresh to communicate with Pulumi. + +[Add a Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) in your Codefresh account from any cloud provider. + +Codefresh automatically creates a kubeconfig in any [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) with all your clusters. This is the same way that Pulumi communicated with Kubernetes, so the integration between Codefresh and Pulumi is ready out of the box. + +Create a [stack](https://pulumi.io/reference/stack.html){:target="\_blank"} in Pulumi or use the one provided in the example. + +Finally add you Pulumi token as a pipeline variable called `PULUMI_ACCESS_TOKEN`. All freestyle steps have automatic access to all pipeline variables, and Pulumi will search for a token by default with this name when logging in. + + +## Create a CI/CD pipeline for Pulumi + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: prepare + git: github-1 + BuildProject: + title: Build project + stage: build + image: pulumi/pulumi + commands: + - yarn install + SelectMyCluster: + title: Select K8s cluster + stage: deploy + image: codefresh/kubectl:1.13.3 + commands: + - kubectl config get-contexts + - kubectl config use-context "kostis-demo@FirstKubernetes" + RunPulumi: + title: Deploying + stage: deploy + image: pulumi/pulumi + commands: + - pulumi stack select dev --non-interactive + - pulumi stack --non-interactive + - pulumi up --non-interactive +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/pipelines/git-clone/). +1. Runs `yarn install` to download dependencies. In this example we use TypeScript, but Go and Python would work as well (or any other language supported by Pulumi). +1. Chooses the cluster that will be used for deployments, if you have more than one. Use your own cluster name as seen in the [Kubernetes dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/) of Codefresh. +1. Runs `pulumi up` with the same target cluster. + +The pipeline needs a [single environment variable]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) that holds the content of your Pulumi Token. + + +{% include image.html +lightbox="true" +file="/images/examples/pulumi/pulumi-access-token.png" +url="/images/examples/pulumi/pulumi-access-token.png" +alt="Passing the Pulumi Token in the pipeline parameters" +caption="Passing the Pulumi Token in the pipeline parameters" +max-width="60%" +%} + +Run the pipeline and see your deployment succeed. + +## Handling Pull requests + +You can easily use the same pipeline or a different one for pull requests. In this case replace the `pulumi up` command with `pulumi preview`. Even better you can add an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to allows humans to inspect the pipeline first. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth.md b/_docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth.md new file mode 100644 index 000000000..b7e2884cd --- /dev/null +++ b/_docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth.md @@ -0,0 +1,92 @@ +--- +title: "Secure a Docker Container using HTTP Basic Auth" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/securing-docker-container-with-http-basic-auth/ + - /docs/on-demand-test-environment/examples-compositions/securing-docker-container-with-http-basic-auth/ + - /docs/on-demand-test-environment/example-compositions/secure-a-docker-container-using-http-basic-auth/ +toc: true +--- +Before making a product publicly available, you might want to restrict access to certain users. These are some options to accomplish this goal: + + - Implement custom authentication within the system + - Configure the server to act as a proxy between the user and the application + - Limit access to specific IP addresses + +This article explains how to secure a container by exposing public ports, using an extra NGINX container to act as a proxy. + +## Expose Web App Public Port + + `webapp` +{% highlight yaml %} +{% raw %} +version: '3' +services: + web: + image: codefreshio/webapp + ports: + - "3000" +{% endraw %} +{% endhighlight %} + +The architecture for this step is displayed in the diagram below. In this step example, Docker is forwarding an internal 3000 port to the host 80 port. + +{% include +image.html +lightbox="true" +file="/images/examples/docker-https/codefresh_webapp_container.png" +url="/images/examples/docker-https/codefresh_webapp_container.png" +alt="codefresh_webapp_container.png" +max-width="40%" +%} + +## Add NGINX Proxy +To secure the web-app we are going to specify these commands in the ```docker-compose.yml``` file. + +1. Remove the port that maps from the web-app (it won't be directly accessible) +2. Add an extra NGINX container with custom configuration (proxy all traffic) +3. Configure NGINX to communicate with the web-app + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + web: + image: ${{build-prj}} + auth: + image: ${{build-nginx}} + ports: + - 80 + links: + - web + environment: + USER: ${{USERNAME}} + PASS: ${{PASSWORD}} +{% endraw %} +{% endhighlight %} + +The architecture for the ```docker-compose.yml``` file is displayed in the diagram below. + +{% include +image.html +lightbox="true" +file="/images/examples/docker-https/codefresh_nginx_container.png" +url="/images/examples/docker-https/codefresh_nginx_container.png" +alt="codefresh_nginx_container.png" +max-width="40%" +%} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/cf-example-basic-auth-container){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper.md b/_docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper.md new file mode 100644 index 000000000..2134ff171 --- /dev/null +++ b/_docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper.md @@ -0,0 +1,203 @@ +--- +title: "Spring Boot + Kafka + Zookeeper" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/spring-boot-kafka-zookeeper/ + - /docs/on-demand-test-environment/example-compositions/spring-boot-kafka-zookeeper/ +toc: true +--- +This project uses `Java, Spring Boot, Kafka, Zookeeper` to show you how to integrate these services in the composition. + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/example-springboot-kafka){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Zookeeper Docker image + +Kafka uses ZooKeeper so you need to first start a ZooKeeper server if you don't already have one + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} + zookeeper: + image: wurstmeister/zookeeper + ports: + - "2181:2181" +{% endraw %} +{% endhighlight %} + +## Kafka Docker image +Now start the Kafka server. In the `docker-compose.yml` it can be something like this + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} + kafka: + build: + context: kafka + dockerfile: Dockerfile + links: + - zookeeper:zk + ports: + - "9092:9092" + environment: + KAFKA_ADVERTISED_HOST_NAME: $CF_HOST_IP + KAFKA_ZOOKEEPER_CONNECT: zk:2181 + KAFKA_MESSAGE_MAX_BYTES: 2000000 + KAFKA_CREATE_TOPICS: "Topic1:1:1" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + depends_on: + - zookeeper +{% endraw %} +{% endhighlight %} + +To start the Kafka server with the certain per-configuration, you need to use Environment variables. Below, you can see which Environment variables are available for this service. + +__Broker IDs__ + +You can configure the broker id in different ways: + +1. Explicitly, using ```KAFKA_BROKER_ID``` +2. Via a command, using ```BROKER_ID_COMMAND```, e.g. ```BROKER_ID_COMMAND: "hostname | awk -F'-' '{print $2}'"``` + +If you don't specify a broker id in your docker-compose file, it will automatically be generated (see [https://issues.apache.org/jira/browse/KAFKA-1070](https://issues.apache.org/jira/browse/KAFKA-1070){:target="_blank"}. This allows scaling up and down. In this case it is recommended to use the ```--no-recreate``` option of docker-compose to ensure that containers are not re-created and thus keep their names and ids. + + +__Automatically create topics__ + +If you want to have kafka-docker automatically create topics in Kafka during +creation, a ```KAFKA_CREATE_TOPICS``` environment variable can be +added in ```docker-compose.yml```. + +Here is an example snippet from ```docker-compose.yml```: + + environment: + KAFKA_CREATE_TOPICS: "Topic1:1:3,Topic2:1:1:compact" + +```Topic 1``` will have 1 partition and 3 replicas, ```Topic 2``` will have 1 partition, 1 replica and a `cleanup.policy` set to `compact`. + +__Advertised hostname__ + +You can configure the advertised hostname in different ways: + +1. Explicitly, using ```KAFKA_ADVERTISED_HOST_NAME``` +2. Via a command, using ```HOSTNAME_COMMAND```, e.g. ```HOSTNAME_COMMAND: "route -n | awk '/UG[ \t]/{print $$2}'"``` + +When using commands, make sure you review the "Variable Substitution" section in [https://docs.docker.com/compose/compose-file/](https://docs.docker.com/compose/compose-file/){:target="_blank"} + +If ```KAFKA_ADVERTISED_HOST_NAME``` is specified, it takes precedence over ```HOSTNAME_COMMAND``` + +For AWS deployment, you can use the Metadata service to get the container host's IP: +``` +HOSTNAME_COMMAND=wget -t3 -T2 -qO- http://169.254.169.254/latest/meta-data/local-ipv4 +``` +Reference: [http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html){:target="_blank"} + +__JMX__ + +For monitoring purposes, you may wish to configure JMX. Additional to the standard JMX parameters, problems could arise from the underlying RMI protocol used to connect + +* java.rmi.server.hostname - interface to bind listening port. +* com.sun.management.jmxremote.rmi.port - the port to service RMI requests. + +For example, to connect to a kafka running locally (assumes exposing port 1099) + + KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099" + JMX_PORT: 1099 + +## Spring Boot + Kafka +Then grab the spring-kafka JAR and all of its dependencies - the easiest way to do that is to declare a dependency in your build tool, e.g. for Maven: + + `Text` +{% highlight xml %} +{% raw %} + + org.springframework.kafka + spring-kafka + ${spring-kafka.version} + + + org.springframework.kafka + spring-kafka-test + ${spring-kafka.version} + test + +{% endraw %} +{% endhighlight %} + +Using plain Java to send and receive a message: + + `Java` +{% highlight java %} +{% raw %} +private static String BOOT_TOPIC = "boot.t"; + +@Autowired +private Sender sender; + +@Autowired +private Receiver receiver; + +@ClassRule +public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, BOOT_TOPIC); + +@BeforeClass +public static void setUpBeforeClass() throws Exception { + System.setProperty("spring.kafka.bootstrap-servers", embeddedKafka.getBrokersAsString()); +} + +@Test +public void testReceive() throws Exception { + sender.send(BOOT_TOPIC, "Hello Boot!"); + + receiver.getLatch().await(10000, TimeUnit.MILLISECONDS); + assertThat(receiver.getLatch().getCount()).isEqualTo(0); +} +{% endraw %} +{% endhighlight %} + +Maven will download the needed dependencies, compile the code and run the unit test case. The result should be a successful build during which following logs are generated: + + `Java` +{% highlight java %} +{% raw %} +. ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v1.5.2.RELEASE) + +08:36:56.175 [main] INFO c.c.kafka.SpringKafkaApplicationTest - Starting SpringKafkaApplicationTest on cnf-pc with PID 700 (started by CodeNotFound in c:\code\st\spring-kafka\spring-kafka-avro) +08:36:56.175 [main] INFO c.c.kafka.SpringKafkaApplicationTest - No active profile set, falling back to default profiles: default +08:36:56.889 [main] INFO c.c.kafka.SpringKafkaApplicationTest - Started SpringKafkaApplicationTest in 1.068 seconds (JVM running for 5.293) +08:36:58.223 [main] INFO c.codenotfound.kafka.producer.Sender - sending user='{"name": "John Doe", "favorite_number": null, "favorite_color": "green"}' +08:36:58.271 [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-L-1] INFO c.c.kafka.consumer.Receiver - received user='{"name": "John Doe", "favorite_number": null, "favorite_color": "green"}' +08:37:00.240 [main] ERROR o.a.zookeeper.server.ZooKeeperServer - ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes +Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.871 sec - in com.codenotfound.kafka.SpringKafkaApplicationTest + +Results: + +Tests run: 3, Failures: 0, Errors: 0, Skipped: 0 + +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 41.632 s +[INFO] Finished at: 2017-04-17T08:37:31+02:00 +[INFO] Final Memory: 18M/212M +[INFO] ------------------------------------------------------------------------ +{% endraw %} +{% endhighlight %} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/terraform.md b/_docs/example-catalog/cd-examples/terraform.md new file mode 100644 index 000000000..0dd05f466 --- /dev/null +++ b/_docs/example-catalog/cd-examples/terraform.md @@ -0,0 +1,113 @@ +--- +title: "Deploy with Terraform" +description: "Use Terraform in a Codefresh pipeline with Docker" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Terraform](https://www.terraform.io/){:target="\_blank"} is a platform for *Infrastructure as Code*. It allows you to describe your cloud infrastructure in a declarative manner. + +You can use Terraform to deploy to Kubernetes or any other supported cloud platform. Because Terraform itself is already offered [in a Docker container](https://hub.docker.com/r/hashicorp/terraform/){:target="\_blank"}, it is very easy to run Terraform in a Codefresh pipeline. + + +{% include image.html +lightbox="true" +file="/images/examples/terraform/terraform-pipeline.png" +url="/images/examples/terraform/terraform-pipeline.png" +alt="Running Terraform inside Codefresh" +caption="Running Terraform inside Codefresh" +max-width="80%" +%} + +## The example Terraform project + +You can see the example project at [https://github.com/codefresh-contrib/terraform-sample-app](https://github.com/codefresh-contrib/terraform-sample-app){:target="\_blank"}. The repository contains a simple Terraform definition that creates a VM on Google cloud. + +You can play with it locally after installing the `terraform` executable. + +## Prerequisites + +You need to [create a Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/) and a Google account first. Then you need to create a [Service account Key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys){:target="\_blank"} which will allow terraform to communicate with Google cloud. + + +Add your service account json as a pipeline variable called `ACCOUNT_JSON_CONTENT`. The content of this variable will be used +in order to authenticate to Google cloud. + +## Create a CI/CD pipeline for Terraform + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - prepare + - deploy +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'codefresh-contrib/terraform-sample-app' + revision: master + git: github + SetupAuth: + image: alpine:3.9 + title: Setting up Google cloud auth + stage: prepare + commands: + - echo $ACCOUNT_JSON_CONTENT > /codefresh/volume/account.json + - cf_export GOOGLE_CLOUD_KEYFILE_JSON=/codefresh/volume/account.json + DeployWithTerraform: + image: hashicorp/terraform:0.12.0 + title: Deploying Terraform plan + stage: deploy + commands: + - terraform init + - terraform apply -auto-approve + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Creates a pipeline variable with the path of the Google service account by running [cf_export]({{site.baseurl}}/docs/pipelines/variables/#exporting-environment-variables-from-a-freestyle-step). +1. Creates the VM on Google cloud by running `terraform init/apply`. + +>For simplicity, we auto-approve the Terraform plan in the example pipeline. In a production pipeline, you would instead use an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to inspect the plan before actually applying it. + +The pipeline needs a [single environment variable]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) that holds the content of the service account. + + +{% include image.html +lightbox="true" +file="/images/examples/terraform/google_cloud_json.png" +url="/images/examples/terraform/google_cloud_json.png" +alt="Passing the Google account in the pipeline parameters" +caption="Passing the Google account in the pipeline parameters" +max-width="60%" +%} + + +Run the pipeline and see your deployment succeed. + + +Note that in a production pipeline you should also handle the [Terraform state](https://www.terraform.io/docs/state/){:target="\_blank"} in a proper manner. The example provided is using a file for [state storage](https://www.terraform.io/docs/backends/index.html){:target="\_blank"} which is not appropriate when using Terraform in a team environment. Instead you should use one of the [storage backends](https://www.terraform.io/docs/backends/types/index.html){:target="\_blank"} that support High Availability and Locking. + + + + +## Handling Pull requests + +You can easily use the same pipeline or a different one for pull requests. In this case replace the `terraform apply` command with `terraform plan`. Even better, you can add an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to allow humans to inspect the pipeline first. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/cd-examples/transferring-php-ftp.md b/_docs/example-catalog/cd-examples/transferring-php-ftp.md new file mode 100644 index 000000000..56aa1d270 --- /dev/null +++ b/_docs/example-catalog/cd-examples/transferring-php-ftp.md @@ -0,0 +1,118 @@ +--- +title: "Deploy to VM via FTP" +description: "Deploying a PHP application to a VM using FTP" +group: example-catalog +sub_group: cd-examples +toc: true +redirect_from: + - /docs//learn-by-example/java/spring-mvc-jdbc-template/ +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-management/create-a-codefresh-account/){:target="\_blank"} +- A remote machine with an FTP server and SSH setup (ensure that your FTP directory, I.e., `/srv/ftp/pub` has the proper write permissions for the FTP user). + +>Note that as you may already know, FTP is extremely insecure as it relies on plain-text passwords and usernames, making data very vulnerable to sniffing. A more secure solution would be to use SFTP or SCP. + +## Example PHP project + +The example project can be found on [GitHub](https://github.com/codefresh-contrib/ftp-php-app){:target="\_blank"}. The application is a simple PHP application that displays an example timer. + +{% include image.html +lightbox="true" +file="/images/examples/php-file-transfer/test-environment.png" +url="/images/examples/php-file-transfer/test-environment.png" +alt="Example PHP Application" +caption="Example PHP Application" +max-width="90%" +%} + +## Create the pipeline + +Our pipeline includes four stages: + +- A stage for cloning +- A stage for packaging +- A stage for transferring files + +{% include image.html +lightbox="true" +file="/images/examples/php-file-transfer/pipeline.png" +url="/images/examples/php-file-transfer/pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +Here is the entire pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ + +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "install" + - "transfer" +steps: + clone: + title: "Cloning main repository..." + type: "git-clone" + arguments: + repo: "codefresh-contrib/ftp-php-app" + git: "github" + stage: "clone" + install_dependencies: + title: "Collecting Php dependencies..." + type: "freestyle" + working_directory: "./ftp-php-app" + arguments: + image: "composer:1.9.3" + commands: + - "composer install --ignore-platform-reqs --no-interaction --no-plugins --no-scripts --prefer-dist" + stage: "install" + steps: + ftp_transfer: + title: "Transferring application to VM via ftp..." + type: "freestyle" + working_directory: "./ftp-php-app" + arguments: + image: "dockito/lftp-client:latest" + environment: + - USER= + - PASSWORD= + - HOST= + - PUB_FTP_DIR= + commands: + - lftp -e "set ftp:use-site-utime2 false; mirror -x ^\.git/$ -X flat-logo.png -p -R ftp-php-ap $PUB_FTP_DIR/ftp-php-app; exit" -u $USER,$PASSWORD $HOST + stage: "transfer" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the main repository through a [Git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Installs the necessary PHP dependencies for our application through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Transfers our application via FTP through another freestyle step. Note that you will need to change the environment variables to your respective values, either in the YAML itself (above), or through the pipeline settings: + +{% include image.html +lightbox="true" +file="/images/examples/php-file-transfer/variables.png" +url="/images/examples/php-file-transfer/variables.png" +alt="Codefresh Environment Variables" +caption="Codefresh Environment Variables" +max-width="90%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + diff --git a/_docs/example-catalog/cd-examples/trigger-a-k8s-deployment-from-docker-registry.md b/_docs/example-catalog/cd-examples/trigger-a-k8s-deployment-from-docker-registry.md new file mode 100644 index 000000000..c15084dd7 --- /dev/null +++ b/_docs/example-catalog/cd-examples/trigger-a-k8s-deployment-from-docker-registry.md @@ -0,0 +1,135 @@ +--- +title: "Trigger a Kubernetes Deployment from a Docker Hub Push Event" +description: "Learn how to trigger a Kubernetes deployment when an image is updated" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +In this example, we will cover how to trigger a Kubernetes deployment from a Dockerhub Push event using a Dockerhub [registry trigger]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/#create-a-new-dockerhub-trigger). + +Our example has two pipelines: one for packaging code (CI), and the second for deploying code (CD). + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +- A DockerHub registry [connected to your Codefresh account]({{site.baseurl}}/docs/integrations/docker-registries/#docker-hub) +- A Kubernetes cluster [connected to your Codefresh account]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) +- A service for your application [deployed to your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#viewing-your-kubernetes-services) + +## Example Project + +You can see the example project on [GitHub](https://github.com/codefresh-contrib/registry-trigger-sample-app/tree/master){:target=\_blank"}. The repository contains a simple Hello World NodeJs app as well as 2 pipelines. + +## Create the CI Pipeline + +As mentioned before, our first pipeline will handle the CI process. +The pipeline has three stages: + +- A stage for cloning +- A stage for building the image +- A stage for pushing the image to DockerHub + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-deployment-ci-pipeline.png" +url="/images/examples/deployments/k8s-deployment-ci-pipeline.png" +alt="Codefresh UI CI Pipeline View" +caption="Codefresh UI CI Pipeline View" +max-width="90%" +%} + + `codefresh-CI-pipeline.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' + +stages: +- checkout +- build +- push + +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: checkout + arguments: + repo: 'codefresh-contrib/registry-trigger-sample-app' + revision: 'master' + git: github + build_my_app: + title: Building image... + type: build + stage: build + arguments: + image_name: registry-trigger-sample-app + working_directory: ${{clone}} + tag: 'master' + dockerfile: Dockerfile + push_to_my_registry: + stage: 'push' + type: push + title: Pushing to Dockerhub... + arguments: + candidate: ${{build_my_app}} + tag: 'latest' + registry: dockerhub + image_name: annabaker/registry-trigger-sample-app +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Builds a docker image tagged with the Application version through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +3. Pushes the Docker image through a [push step](https://codefresh.io/docs/docs/pipelines/steps/push/) to the Docker Hub registry you have integrated with Codefresh. + +## Create the CD Pipeline + +This pipeline contains one stage/step, for deploying. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-deployment-CD-pipeline.png" +url="/images/examples/deployments/k8s-deployment-CD-pipeline.png" +alt="Codefresh UI CD Pipeline View" +caption="Codefresh UI CD Pipeline View" +max-width="90%" +%} + +Note that for the trigger mechanism to take place, you will need to [add a Docker Hub registry trigger]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/#create-a-new-dockerhub-trigger) to the pipeline. + + `codefresh-CD-pipeline.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - "deploy" + +steps: + deploy_to_k8s: + title: Running Deploy Script... + type: deploy + kind: kubernetes + arguments: + cluster: anna-demo@FirstKubernetes + namespace: default + service: registry-trigger-sample-app + candidate: + image: annabaker/registry-trigger-sample-app:latest + registry: 'dockerhub' +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Deploys the image to Kubernetes through a [deploy step]({{site.baseurl}}/docs/pipelines/steps/deploy/). The deploy step uses a [Registry trigger]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/#create-a-new-dockerhub-trigger) to kick off the pipeline when the updated image is pushed to the registry. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/) diff --git a/_docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step.md b/_docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step.md new file mode 100644 index 000000000..b228a895f --- /dev/null +++ b/_docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step.md @@ -0,0 +1,42 @@ +--- +title: "Use kubectl as part of freestyle step" +description: "How to run manually kubectl in a Codefresh pipeline" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/use-kubectl-as-part-of-freestyle-step/ +toc: true +--- + + +Running Kubernetes commands in Codefresh as part of the workflow is very easy. + + +Codefresh is adding all your clusters into the workflow ready to be used as part of your CI/CD pipeline. +The context remains the same as it appears in the [Codefresh Kubernetes dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/). + +>If your cluster name includes spaces then make sure that you use quotes in the `kubectl` command. + +* Use image: `codefresh/kubectl` +* Add your commands: + * `kubectl config get-contexts`. Will print the cluster that we added to the workflow + * `kubectl config use-context "my-cluster-name"`. The name is the same as in `Account settings` → `Integrations` → `Kubernetes` + * `kubectl get po -owide` + * `kubectl get nodes` + + +## Follow the example + +* Add this [Git repo](https://github.com/Codefresh-Examples/kubectl-in-freestyle-step){:target="_blank"} to your account +* Change the pipeline configuration to use `codefresh.yml`. +* Build. + +## Running parallel steps with kubectl + +More complex examples can be found in the [custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/) documentation page. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/web-terminal.md b/_docs/example-catalog/cd-examples/web-terminal.md new file mode 100644 index 000000000..371515283 --- /dev/null +++ b/_docs/example-catalog/cd-examples/web-terminal.md @@ -0,0 +1,48 @@ +--- +title: "Web terminal" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/web-terminal/ + - /docs/on-demand-test-environment/example-compositions/web-terminal/ +toc: true +--- +This example shows you how to access containers running in a Codefresh standup environment. + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Here are the contents of this file: + + `Composition.yml` +{% highlight yaml %} +version: '3' +services: + my-service: + image: 'containers101/whomi:master' + volumes: + - my-service:/app + ports: + - '1337' + terminal: + image: 'containers101/cfterminal:master' + ports: + - '8000' + volumes_from: + - my-service +volumes: + my-service: + driver: local +{% endhighlight %} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/cf-example-web-termial){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/android.md b/_docs/example-catalog/ci-examples/android.md new file mode 100644 index 000000000..a02d66b17 --- /dev/null +++ b/_docs/example-catalog/ci-examples/android.md @@ -0,0 +1,80 @@ +--- +title: "Compile and package an Android application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Android applications use Java/Gradle for their build system. Because Codefresh already supports [Gradle]({{site.baseurl}}/docs/example-catalog/ci-examples/gradle/), it is also very easy to build Android projects. + +Any Gradle command can run inside a Docker image that contains the Android SDK. As an example, we will use a [Nextcloud](https://hub.docker.com/r/nextcloudci/android){:target="\_blank"} image from Dockerhub. + + +## The example project + +You can see the example project at [https://github.com/codefresh-contrib/android-sample-app](https://github.com/codefresh-contrib/android-sample-app){:target="\_blank"}. The repository contains a Hello World Android project with the following tasks: + +* `./gradlew test` runs unit tests +* `./gradlew build` builds the application + + +## Create a CI pipeline that compiles/releases Android + +In most cases you would create a similar pipeline to a Gradle project. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/mobile/android-ci-pipeline.png" +url="/images/learn-by-example/mobile/android-ci-pipeline.png" +alt="Building and Testing an Android app" +caption="Building and Testing an Android app" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/android-sample-app/blob/master/codefresh.yml){:target="\_blank"} that uses a Docker image with the Android SDK in order to run Gradle. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/android-sample-app' + revision: master + git: github + TestIt: + title: Running Tests + stage: test + image: nextcloudci/android:android-48 + commands: + - chmod +x ./gradlew + - ./gradlew test --no-daemon --gradle-user-home=/codefresh/volume/.gradle + BuildIt: + title: Packaging Android App + stage: build + image: nextcloudci/android:android-48 + commands: + - ./gradlew build --no-daemon --gradle-user-home=/codefresh/volume/.gradle +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, runs unit tests and finally builds the Android application. + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#how-caching-works-in-codefresh) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for Maven/Gradle which keep their cache externally. By changing the location of the Gradle cache we make sure that Codefresh will cache automatically the Gradle libraries resulting in much faster builds. + + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + diff --git a/_docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository.md b/_docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository.md new file mode 100644 index 000000000..d81e5363a --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository.md @@ -0,0 +1,94 @@ +--- +title: "Build an Image from a different Git repository" +description: "Build microservices from other repositories" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-an-image-from-a-different-git-repository/ +toc: true +--- + +In most cases, your Codefresh pipeline checks out a single Git repository. Codefresh has great support also for [monorepos]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#using-the-modified-files-field-to-constrain-triggers-to-specific-folderfiles) if you have placed all your applications in a single repository. + +A Codefresh pipeline is not really tied to a specific Git repository, which means that by [checking out multiple Git repositories]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/#cloning-multiple-repositories) you can build Docker images from other unrelated repositories in a single pipeline if you wish to do so. + +## Building Docker images from other Git repositories + + +Here is a Codefresh pipeline that checks out two microservices from two different Git repositories. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/build-from-other-git-repo.png" +url="/images/examples/docker-build/build-from-other-git-repo.png" +alt="Checkout and build docker images" +caption="Checkout and build docker images" +max-width="100%" +%} + +And here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/). + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - 'clone phase' + - 'build phase' +steps: + checkoutApp1: + title: 'Cloning first repository...' + type: git-clone + repo: kostis-codefresh/example_nodejs_postgres + revision: experiment1 + git: github + stage: 'clone phase' + checkoutApp2: + title: 'Cloning second repository...' + type: git-clone + repo: kostis-codefresh/trivial-go-web + revision: master + git: github + stage: 'clone phase' + myFirstDockerImage: + title: 'Building Microservice A' + type: build + dockerfile: Dockerfile + image_name: my-nodejs-image + tag: from-develop-branch + working_directory: './example_nodejs_postgres' + stage: 'build phase' + mySecondDockerImage: + title: 'Building Microservice B' + type: build + dockerfile: Dockerfile + working_directory: './trivial-go-web' + image_name: my-app-image + tag: from-master-branch + stage: 'build phase' +{% endraw %} +{% endhighlight %} + +The pipeline first checks out two different Git repositories, which themselves contain Dockerfiles. Then it creates a Docker image for each one using the respective Dockerfile. + +You can see both images in the [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/#viewing-docker-images) . + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/two-docker-images.png" +url="/images/examples/docker-build/two-docker-images.png" +alt="Docker images from other Git repos" +caption="Docker images from other Git repos" +max-width="100%" +%} + + +Notice that there are no explicit push steps in the pipeline, as all successful Codefresh pipelines automatically push to the private Docker registry. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +[Build step in pipelines in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build and Push an image]({{site.baseurl}}/docs/pipelines/examples/build-and-push-an-image/) +[Parallel pipelines]({{site.baseurl}}/docs/pipelines/advanced-workflows/) diff --git a/_docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location.md b/_docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location.md new file mode 100644 index 000000000..75d5b67f5 --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location.md @@ -0,0 +1,74 @@ +--- +title: "Build an Image by specifying a Dockerfile location" +description: "How to choose a Dockerfile to build with Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-an-image-specify-dockerfile-location/ +toc: true +--- + +You may have a project where the Dockerfile is **not** in the root folder of the project. Maybe the repository has multiple projects inside, each with its own Dockerfile, or you simply want to use a different folder for the Docker context. + +>The source code of the repository is at [https://github.com/codefreshdemo/cf-example-dockerfile-other-location](https://github.com/codefreshdemo/cf-example-dockerfile-other-location){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + + +## Building a Dockerfile from a different folder + +By default, if you run a single command like the one below, Docker uses the Dockerfile of the current folder: + +``` +docker build . -t my-web-app +``` + +If your Dockerfile is in a different folder, specify it explicitly with: + +``` +docker build . -t my-web-app -f subfolder/Dockerfile +``` + +Codefresh supports a similar syntax as well. The `dockerfile` property of the [build step]({{site.baseurl}}/docs/pipelines/steps/build/) can accept a full path. + +Here is the full pipeline: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-dockerfile-other-location' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '.' + tag: 'master' + dockerfile: docker/Dockerfile +{% endhighlight %} + +This pipeline checks out the source code of the repository and then builds a Dockerfile found at the subfolder `docker` while still keeping as Docker context the root directory. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/build-spefify-dockerfile.png" +url="/images/examples/docker-build/build-spefify-dockerfile.png" +alt="Building a Docker image with specific Dockerfile" +caption="Building a Docker image with specific Dockerfile" +max-width="100%" +%} + +You could also change the Docker build context by editing the `working_directory` property. By default, it looks at the root folder of the project, but any subfolder path is also valid. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build and push an Image]({{site.baseurl}}/docs/yaml-examples/example-catalog/ci-examples/build-and-push-an-image) +[Build an Image With build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/build-an-image-with-build-arguments.md b/_docs/example-catalog/ci-examples/build-an-image-with-build-arguments.md new file mode 100644 index 000000000..a7a623566 --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-with-build-arguments.md @@ -0,0 +1,133 @@ +--- +title: "Build an Image with build arguments" +description: "Use Docker arguments in Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-an-image-with-build-arguments/ +toc: true +--- + +Building a Docker image that requires build arguments is very easy with Codefresh pipelines. + +The source code of the repository is at [https://github.com/codefreshdemo/cf-example-build-arguments](https://github.com/codefreshdemo/cf-example-build-arguments){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + +## Using Docker build arguments + +The example application is a very simple NodeJS application with the following DYouockerfile: + +`Dockerfile` +{% highlight docker %} +{% raw %} +ARG NODE_VERSION +FROM node:$NODE_VERSION + +ARG APP_DIR + +RUN mkdir -p $APP_DIR + +WORKDIR $APP_DIR + +COPY package.json . +RUN npm install --silent +COPY . . +EXPOSE 3000 + +ENV PORT 3000 + +CMD [ "npm", "start" ] +{% endraw %} +{% endhighlight %} + +This Dockerfile expects two [build arguments](https://docs.docker.com/engine/reference/builder/#/arg){:target="\_blank"}: + +* `NODE_VERSION` is the version of Node image to use as base +* `APP_DIR` is the source directory to be used inside the container + +## Building a Dockerfile passing values for build arguments + +When you build an image locally on your workstation, you can define build arguments with the `--build-arg` syntax: + +``` +docker build . -t my-node-app --build-arg NODE_VERSION=8 --build-arg APP_DIR=/usr/src/app +``` + +You can get the same result within a Codefresh pipeline: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-build-arguments' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '.' + tag: 'master' + dockerfile: Dockerfile + build_arguments: + - NODE_VERSION=8 + - APP_DIR=/usr/src/app +{% endraw %} +{% endhighlight %} + +This pipeline checks out the source code of the repository and then builds the Dockerfile by passing the values `8` and `/usr/src/app` to the two arguments. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/docker-build-arguments.png" +url="/images/examples/docker-build/docker-build-arguments.png" +alt="Using Docker build arguments in a pipeline" +caption="Using Docker build arguments in a pipeline" +max-width="100%" +%} + +## Using Codefresh variables as build arguments + +In the previous pipeline, the Docker build arguments are defined in the pipeline itself, but you can also use [pipeline variables]({{site.baseurl}}/docs/pipelines/pipelines/#creating-new-pipelines), [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/), or any other standard mechanism you already have in place. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-build-arguments' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '.' + tag: 'master' + dockerfile: Dockerfile + build_arguments: + - NODE_VERSION=${{NODE_VERSION_FROM_SHARED_CONFIG}} + - APP_DIR=${{APP_DIR_PIPELINE_VARIABLE}} +{% endraw %} +{% endhighlight %} + +In this case, you can also use any of the built-in [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/). + + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +[Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build and push an Image]({{site.baseurl}}/docs/yaml-examples/example-catalog/ci-examples/build-and-push-an-image) diff --git a/_docs/example-catalog/ci-examples/build-an-image-with-the-dockerfile-in-root-directory.md b/_docs/example-catalog/ci-examples/build-an-image-with-the-dockerfile-in-root-directory.md new file mode 100644 index 000000000..a9c5cb2e2 --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-with-the-dockerfile-in-root-directory.md @@ -0,0 +1,67 @@ +--- +title: "Build an Image with the Dockerfile in root directory" +description: "Get started quickly with building Docker images" +group: example-catalog +sub_group: ci-examples +toc: true +--- +Building a Docker image is one of the basic operations in Codefresh pipelines. + +>The source code of the repository is at [https://github.com/codefreshdemo/cf-yml-example-build-dockerfile-inroot](https://github.com/codefreshdemo/cf-yml-example-build-dockerfile-inroot){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + + +## Building a Dockerfile from the root folder + +By default, if you run a single command like the one below, Docker uses the Dockerfile of the current folder: + +``` +docker build . -t my-web-app +``` + +You can get the same result within a Codefresh pipeline: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-yml-example-build-dockerfile-inroot' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '${{main_clone}}' + tag: 'master' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This pipeline checks out the source code of the repository and then builds a dockerfile found at the root folder of the project. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/build-dockerfile-root.png" +url="/images/examples/docker-build/build-dockerfile-root.png" +alt="Building a Docker image with a default Dockerfile" +caption="Building a Docker image with a default Dockerfile" +max-width="100%" +%} + +You can also change the Docker build context by editing the `working_directory` property. By default, it looks at the root folder of the project, but any subfolder path is also valid. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build and push an Image]({{site.baseurl}}/docs/yaml-examples/example-catalog/ci-examples/build-and-push-an-image) +[Build an Image With build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) diff --git a/_docs/example-catalog/ci-examples/build-and-push-an-image.md b/_docs/example-catalog/ci-examples/build-and-push-an-image.md new file mode 100644 index 000000000..33ebac637 --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-and-push-an-image.md @@ -0,0 +1,137 @@ +--- +title: "Build and push an Image" +description: "Build Docker images and push them to registries with Codefresh" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-and-push-an-image/ + - /docs/docker-registries/push-image-to-a-docker-registry/ +toc: true +--- + +Building a Docker image and then pushing it to a registry is one of the most basic scenarios for creating a pipeline. +In this example we will use a demo Node.js application that will be packaged in a Docker image. + +The source code of the repository is at [https://github.com/codefreshdemo/cf-example-build-and-push](https://github.com/codefreshdemo/cf-example-build-and-push){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + + +## Building and push Docker image to default registry + +Building a Docker image with Codefresh is easy, and only requires a simple step. In addition, all successful pipelines in Codefresh automatically push to [your default Docker registry]({{site.baseurl}}/docs/docker-registries/#the-default-registry), without additional configuration, if you have one. + +Here is the most basic pipeline that clones a repo and builds an image: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- checkout +- build +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: checkout + repo: 'codefreshdemo/cf-example-build-and-push' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + stage: build + image_name: my-node-js-app + working_directory: {{clone}} + tag: 'master' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +## Building and pushing Docker image to _any registry_. + +You can push your image to any [registry]({{site.baseurl}}/docs/docker-registries/). + +* First you need to connect your external registry in the integrations page. Here are the instructions for: + + * [Docker Hub]({{site.baseurl}}/docs/integrations/docker-registries/docker-hub/) + * [Google Container Registry]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/) + * [Amazon EC2 Container Registry]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) + * [Bintray.io]({{site.baseurl}}/docs/integrations/docker-registries/bintray-io/) + * [Quay.io]({{site.baseurl}}/docs/integrations/docker-registries/quay-io/) + * [Other Registries]({{site.baseurl}}/docs/integrations/docker-registries/other-registries/) + +* Then add a [push step]({{site.baseurl}}/docs/pipelines/steps/push/) in your pipeline and use the registry name of your integration. + +Here is the full example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- checkout +- build +- push +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: checkout + repo: 'codefreshdemo/cf-example-build-and-push' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + stage: build + image_name: my-node-js-app + working_directory: {{clone}} + tag: 'master' + dockerfile: Dockerfile + push_to_my_registry: + stage: 'push' + type: push + title: Pushing to a registry + candidate: ${{build_my_app}} + tag: 'v1.0.0' + registry: dockerhub + image_name: kkapelon/my-node-js-app +{% endraw %} +{% endhighlight %} + +Here we use a specific tag - `v1.0.0` but +Codefresh has several variables that you can use to tag images. Common examples are `CF_BRANCH_TAG_NORMALIZED`, `CF_SHORT_REVISION` or `CF_BUILD_ID`. Read more on [variables]({{site.baseurl}}/docs/pipelines/variables/). + +{% include image.html + lightbox="true" + file="/images/examples/docker-build/build-and-push-pipeline.png" + url="/images/examples/docker-build/build-and-push-pipeline.png" + alt="Pushing image to external registry" + caption="Pushing image to external registry" + max-width="100%" + %} + + +If you run the pipeline, the Docker image is pushed *both* to the private Docker regisry (by the build step) *and* the external docker registry (by the push step). + + +## More options for pushing images + +Codefresh has several options when it comes to pushing images: + +* You can specify multiple tags to be pushed +* You can use directly ECR registries +* You can embed credentials in the push steps + +Read more in [push steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/push/). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +[Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build an Image With Build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) diff --git a/_docs/example-catalog/ci-examples/c-make.md b/_docs/example-catalog/ci-examples/c-make.md new file mode 100644 index 000000000..06b95d76d --- /dev/null +++ b/_docs/example-catalog/ci-examples/c-make.md @@ -0,0 +1,74 @@ +--- +title: "Compile and test a C application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any C/C++ application very easily as both `gcc` and `g++` are already offered in Dockerhub. There is also another example available with [C++ and cmake]({{site.baseurl}}/docs/example-catalog/ci-examples/cpp-cmake). + +## The example C project + +You can see the example project at [https://github.com/codefresh-contrib/c-sample-app](https://github.com/codefresh-contrib/c-sample-app){:target="\_blank"}. The repository contains a C starter project with a `Makefile` and several targets: + +* `make` compiles the code. +* `make test` runs unit tests +* `make clean` removes artifacts and binaries. + +There are also extra targets for `tags` and `etags`. + +## Create a CI pipeline for C applications + +Creating a CI/CD pipeline for C is very easy, because Codefresh can run any [gcc image](https://hub.docker.com/_/gcc/){:target="\_blank"} that you wish. Gcc docker images already contain the `make` utility. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/cc/c-make-pipeline.png" +url="/images/learn-by-example/cc/c-make-pipeline.png" +alt="Compiling a C application in a pipeline" +caption="Compiling a C application in a pipeline" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/c-sample-app/blob/master/codefresh.yml){:target="\_blank"} that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - build +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'codefresh-contrib/c-sample-app' + revision: master + git: github + compile_my_sources: + title: Compile + stage: build + image: gcc + commands: + - make + run_my_tests: + title: Test + stage: build + image: gcc + commands: + - make test +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, compiles the code and runs unit tests. In all cases we use the public Docker image of Gcc that also contains `make`. + + +## Related articles +[C++ example]({{site.baseurl}}/docs/example-catalog/ci-examples/cpp-cmake/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/call-child-pipelines.md b/_docs/example-catalog/ci-examples/call-child-pipelines.md new file mode 100644 index 000000000..fd83b5b7c --- /dev/null +++ b/_docs/example-catalog/ci-examples/call-child-pipelines.md @@ -0,0 +1,108 @@ +--- +title: "Call a CD pipeline from a CI pipeline" +description: "How to call child pipelines from a parent pipeline" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +In Codefresh you can easily create nested pipelines by calling other pipelines from within an existing pipeline. The [codefresh-run plugin](https://codefresh.io/steps/step/codefresh-run){:target="\_blank"} allows you to launch another pipeline, and optionally wait for its completion. + +{% include image.html +lightbox="true" +file="/images/examples/nested-pipelines/call-other-pipeline.png" +url="/images/examples/nested-pipelines/call-other-pipeline.png" +alt="Parent and child pipelines" +caption="Parent and child pipelines" +max-width="80%" +%} + +A very common pattern in Codefresh is to have a parent pipeline responsible for Continuous Integration (packaging code), that calls a child pipeline for Continuous Delivery (taking care of deployment). + +## Example project + +You can see the example project at [https://github.com/codefresh-contrib/call-child-pipeline-sample-app](https://github.com/codefresh-contrib/call-child-pipeline-sample-app){:target="\_blank"}. The repository contains a NodeJs app as well as three - one parent and two child pipelines. + +## Create a pipeline that calls other pipelines + +Here is the definition of the parent pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - package + - deploy +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: github + stage: prepare + read_my_app_version: + title: Reading Application version + stage: prepare + image: node:latest + commands: + - export PACKAGE_VERSION=$(node -p "require('./package.json').version") + - cf_export PACKAGE_VERSION + build_my_docker_image: + title: 'Building My Docker Image' + stage: package + type: build + dockerfile: Dockerfile + image_name: my-app-image + tag: ${{PACKAGE_VERSION}} + call_qa_pipeline: + title: Deploy to QA + stage: deploy + type: codefresh-run + arguments: + PIPELINE_ID: child-pipelines/qa-pipeline + VARIABLE: + - CF_BRANCH=${{CF_BRANCH}} + - CF_REVISION=${{CF_REVISION}} + - APP_VERSION=${{PACKAGE_VERSION}} + when: + branch: + only: + - develop + call_prod_pipeline: + title: Deploy to Prod + stage: deploy + type: codefresh-run + arguments: + PIPELINE_ID: child-pipelines/prod-pipeline + VARIABLE: + - CF_BRANCH=${{CF_BRANCH}} + - CF_REVISION=${{CF_REVISION}} + - APP_VERSION=${{PACKAGE_VERSION}} + when: + branch: + only: + - /^release.*/i + + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Creates a variable that contains the Application version as specified in `package.json` through [cf_export]({{site.baseurl}}/docs/pipelines/variables/#exporting-environment-variables-from-a-freestyle-step). +1. Builds a docker image tagged with the Application version through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Optionally runs the downstream QA pipeline if the branch is named `develop`. It also passes several environment variables to the child pipeline (including the Application version). +1. Optionally runs the downstream Prod pipeline if the branch name starts with `release`. It also passes several environment variables to the child pipeline (including the Application version). + +The last two steps use [conditions]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) to decide if they will run or not. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Pipeline plugins](https://codefresh.io/steps/){:target="\_blank"} \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/cc.md b/_docs/example-catalog/ci-examples/cc.md new file mode 100644 index 000000000..c23c08fcd --- /dev/null +++ b/_docs/example-catalog/ci-examples/cc.md @@ -0,0 +1,10 @@ +--- +title: "C/C++" +description: "How to build C/C++ applications with Codefresh CI/CD pipelines" +group: example-catalog +toc: true +--- +This section contains Codefresh examples based on C and C++. + +- [C Example with make]({{site.baseurl}}/docs/learn-by-example/cc/c-make) +- [C++ Example with cmake]({{site.baseurl}}/docs/learn-by-example/cc/cpp-cmake) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/codacy-testing.md b/_docs/example-catalog/ci-examples/codacy-testing.md new file mode 100644 index 000000000..bd2e437b6 --- /dev/null +++ b/_docs/example-catalog/ci-examples/codacy-testing.md @@ -0,0 +1,174 @@ +--- +title: "Codacy coverage reports" +description: "How to forward coverage reports to Codacy" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Codacy](https://www.codacy.com/){:target="\_blank"} is a code review tool that allows automatic analysis, code coverage tracking, and extensive reports, for you and your team to improve your code quality over time. + +Analysis reports displayed within Codacy dashboard: +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-report.png" +url="/images/testing/codacy/codacy-report.png" +alt="Codacy UI with coverage reports" +max-width="100%" +%} + +## Prerequisites for using Codacy + +* A simple [Codefresh pipeline, up and running]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) +* A [Codacy account](https://www.codacy.com/){:target="\_blank"} (free, pro or enterprise) +* A testing tool added to your project that produces coverage reports + +Codacy supports over [30 different language integrations](https://docs.codacy.com/getting-started/supported-languages-and-tools/){:target="\_blank"}. Depending on the programming language used, it requires little to no set-up. + +You could try it out by cloning our [node example application](https://github.com/codefresh-contrib/codacy-sample-app){:target="\_blank"} that utilises [jest](https://jestjs.io/){:target="\_blank"}. + +## Create an account with Codacy +Codacy has a free version, a pro version, and an on-premises version. The latter two have a free trial, which allows you to test all features over the course of two weeks. You can sign-up via GitHub, Bitbucket, or GitLab. + +When you log into Codacy for the first time, it will ask you to provide access to a repository. At this stage, Codacy will not download any code from your repository but merely access its names. You can then either provide access to selective repositories or your entire git account. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-add-repo.png" +url="/images/testing/codacy/codacy-add-repo.png" +alt="Add repository to codacy" +max-width="80%" +%} + +## Generate Project API token +To use Codacy, we need a project API token. To generate the token, select your project => go to settings => integrations => add integration => select “Project API”. Make sure that you select the API token from here and not your general project settings. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/create-api-token.png" +url="/images/testing/codacy/create-api-token.png" +alt="Create Project API token" +max-width="80%" +%} + +## Codefresh pipeline + +In case the project that you want to use Codacy in does not have a pipeline, [create a new pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/). + +{% include image.html +lightbox="true" +file="/images/testing/codacy/create-codacy-pipeline.png" +url="/images/testing/codacy/create-codacy-pipeline.png" +alt="Create Codacy Pipeline" +max-width="80%" +%} + +**Setting-up step** + +This step is based on our [TypeScript application](https://github.com/codefresh-contrib/codacy-sample-app){:target="\_blank"}. Before we set up our pipeline, we will add our Project API token as our environment variable. Note that we have specified our token in the variables section on the right, as displayed in the following screenshot. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-variable.png" +url="/images/testing/codacy/codacy-variable.png" +alt="Provide Codacy ENV variable" +max-width="80%" +%} + +Once the variable is called through the [Codefresh yml syntax]({{site.baseurl}}/docs/pipelines/variables/), it automatically uses the value provided within the variables section. If you are using this example as your pipeline, please delete anything in your pipeline. We can then add the following pipeline to our Inline YAML within the Workflow section in our UI: + +{% highlight yaml %} +{% raw %} +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "build" + - "test" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "anais-codefresh/codacy-sample-app" + # CF_BRANCH value is auto set when pipeline is triggered + # Learn more at codefresh.io/docs/docs/pipelines/variables/ + revision: "${{CF_BRANCH}}" + git: "github" + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "anaisurlichs/codacy-sample-app" + working_directory: "${{clone}}" + tag: "${{CF_BRANCH_TAG_NORMALIZED}}" + dockerfile: "Dockerfile" + stage: "build" + registry: "dockerhub" + + tests: + title: "Running test" + type: "freestyle" + working_directory: '${{clone}}' + arguments: + image: 'node:15.2' + commands: + - "npm install --save-dev jest" + - "npm run test" + stage: "test" + + codacy: + title: "Pushing reports to codacy" + type: "freestyle" + working_directory: '${{clone}}' + arguments: + image: 'alpine:3.8' + commands: + - "export CODACY_PROJECT_TOKEN=${{CODACY_PROJECT_TOKEN}}" + - "wget -qO - https://coverage.codacy.com/get.sh | sh" + stage: "test" +{% endraw %} +{% endhighlight %} + +The last two steps, ’tests’ and ’codacy’, are used to run our tests, create our coverage reports and forward those to Codacy. If you are using your own project and existing pipeline, add those two steps to your pipeline. In case you are using your own application, make sure to adapt the commands within the test step to run the tests of your application. Additionally, ensure that both the ’repo’ and the ’image_name’ point to your integrations. + +Once you run the pipeline, the steps will create the coverage report and forwards it to Codacy. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-pipeline.png" +url="/images/testing/codacy/codacy-pipeline.png" +alt="Pipeline with Codacy step" +max-width="80%" +%} + +## View reports + +You can view the updated coverage reports within Codacy's UI every time you make a commit and/or run the Codefresh pipeline directly. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-report.png" +url="/images/testing/codacy/codacy-report.png" +alt="Codacy UI Analysis Dashboard" +max-width="80%" +%} + +You can access further information on the coverage report by opening the file tab and accessing a specific file from your repository. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/file-analysis.png" +url="/images/testing/codacy/file-analysis.png" +alt="Codacy report details" +max-width="90%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Sonarqube Integration]({{site.baseurl}}/docs/testing/sonarqube-integration/) diff --git a/_docs/example-catalog/ci-examples/codecov-testing.md b/_docs/example-catalog/ci-examples/codecov-testing.md new file mode 100644 index 000000000..82e06f88a --- /dev/null +++ b/_docs/example-catalog/ci-examples/codecov-testing.md @@ -0,0 +1,128 @@ +--- +title: "Codecov coverage reports" +description: "How to forward coverage reports to Codecov" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Codecov account](https://codecov.io/){:target="\_blank"} is a code analysis tool with which users can group, merge, archive, and compare coverage reports. Code coverage describes which lines of code were executed by the test suite and which ones were not. However, this is not to be confused with a testing tool. + +Analysis reports displayed within the Codecov dashboard: +{% include image.html +lightbox="true" +file="/images/testing/codecov/analysis-report.png" +url="/images/testing/codecov/analysis-report.png" +alt="Codecov UI Analysis reports" +max-width="50%" +%} + +## Prerequisites for using Codecov + +* A simple [Codefresh pipeline up and running](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +* A [Codecov account](https://codecov.io/){:target="\_blank"} (free or enterprise) +* A testing tool added to your project that produces coverage reports + +Note that reports should ideally be written in .json, .xml, or txt. To be sure, please double check that your coverage [report format](https://docs.codecov.io/docs/supported-report-formats){:target="\_blank"} is supported. You can find a variety of examples for different programming languages and suggestions for respective testing tools in the [Codecov docs](https://docs.codecov.io/docs/supported-languages){:target="\_blank"}. + +To test Codecov and follow along with the next section, you can clone our [Codecov sample app](https://github.com/codefresh-contrib/codecov-sample-app){:target="\_blank"}. + +## Create a Codecov account + +Once you sign up to Codecov, you can add a new repository. The UI will then provide you with an access token to the repository. While it is recommended that you take note of the token, you will still be able to access it within the **Settings** tap. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-interface.png" +url="/images/testing/codecov/codecov-interface.png" +alt="Codecov Project Repository UI" +max-width="50%" +%} + +## Codefresh pipeline + +In this case, we divided testing and connecting Codefresh to Codecov into two different steps. If they can be run within the same image, you could also connect them. + +**Testing step** +Runs the command(s) for our testing tool. This will generate the code coverage report upon running the pipeline. Please refer to the Codecov documentation for [supported testing frameworks](https://docs.codecov.io/docs/supported-report-formats){:target="\_blank"}. The [README of each example](https://docs.codecov.io/docs/supported-languages){:target="\_blank"} refers to possible frameworks that can be used. + +In general, ensure that the framework you use for testing and generating code coverage reports: +* Produce code coverage reports in the supported file format +* Is compatible with the programming language that your program is written in + +{% highlight yaml %} +{% raw %} + test: + title: "Running test" + type: "freestyle" # Run any command + image: "node:14.19.0" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "npm install --save-dev jest" + - "npx jest --coverage" + stage: "test" +{% endraw %} +{% endhighlight %} + +**Codecov step** + +{% highlight yaml %} +{% raw %} +upload: + title: "Running test" + type: "freestyle" # Run any command + image: "node:14.19.0" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "ci_env=`curl -s https://codecov.io/env`" + - "npm install codecov -g" + - "codecov -t ${{CODECOV_TOKEN}} -f ./coverage/clover.xml" + stage: "upload" +{% endraw %} +{% endhighlight %} + +The commands run inside of the node Docker image: +* `ci_env= curl -s https://codecov.io/env`: Sets the CI environment variable to take note that we are using Codefresh +* `npm install codecov -g`: Installs the odecov CLI +* `codecov -t ${{CODECOV_TOKEN}} -f ./coverage/clover.xml`: Sets the Codevoc access token provided in the UI when we connect to a new Git repository and point to the file that contains our coverage report. + +Once you run the pipeline, the steps will create the coverage report and forward it to Codecov. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-pipeline.png" +url="/images/testing/codecov/codecov-pipeline.png" +alt="Pipeline with codecov step" +max-width="50%" +%} + +## View reports + +You can view the updated coverage reports within the Codecov UI every time you make a commit and/or run the Codefresh pipeline directly. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-report.png" +url="/images/testing/codecov/codecov-report.png" +alt="Pipeline with codecov step" +max-width="50%" +%} + +You can access further information on the coverage report by opening the link to the file displayed in the table. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-report-details.png" +url="/images/testing/codecov/codecov-report-details.png" +alt="Codecov report details" +max-width="50%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Sonarqube Integration]({{site.baseurl}}/docs/testing/sonarqube-integration/) + diff --git a/_docs/example-catalog/ci-examples/coveralls-testing.md b/_docs/example-catalog/ci-examples/coveralls-testing.md new file mode 100644 index 000000000..dd060c20b --- /dev/null +++ b/_docs/example-catalog/ci-examples/coveralls-testing.md @@ -0,0 +1,221 @@ +--- +title: "Coveralls coverage reports" +description: "How to forward coverage reports to Coveralls" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Coveralls](https://coveralls.io/){:target="\_blank"} is a web service that allows users to track the code coverage of their application over time in order to optimize the effectiveness of their unit tests. This section details how coverage reports can be generated and forwarded to Coveralls with every Codefresh build. + +Analysis reports displayed within Coveralls dashboard: +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-sample-app.png" +url="/images/testing/coveralls/coveralls-sample-app.png" +alt="Coveralls UI Analysis reports" +max-width="80%" +%} + +## Prerequisites for using Coveralls + +* A simple [Codefresh pipeline up and running](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +* A [Coveralls account](https://coveralls.io/) (free or enterprise) -- Note that all open-source projects are free on Coveralls +* A testing tool added to your project that produces coverage reports + +Coveralls supports [22 different language integrations](https://docs.coveralls.io/about-coveralls){:target="\_blank"}. Each example provided in the official documentation suggests several coverage report tools that can be used in combination with Coveralls. + +You could try it out by cloning our [node example application](https://github.com/codefresh-contrib/coveralls-sample-app){:target="\_blank"} that utilises [jest](https://jestjs.io/){:target="\_blank"}. + +## Prepare your repository + +If you are using your own application as an example, you have to make a few modifications to the repository. Please have a look at the Coveralls example section for other languages. + +First, install Coveralls in your project: +{% highlight yaml %} +{% raw %} +npm install coveralls --save-dev +{% endraw %} +{% endhighlight %} + +Coveralls requires a [script](https://github.com/nickmerwin/node-coveralls){:target="\_blank"} that takes standard input and sends it to coveralls.io to report your code coverage. Depending on the framework that you are using, you will have to add a different script to your application. + +Any coverage reports can be forwarded that are within a [lcov data format](http://ltp.sourceforge.net/coverage/lcov/geninfo.1.php){:target="\_blank"} (including [mocha's LCOV reporter](https://www.npmjs.com/package/mocha-lcov-reporter){:target="\_blank"}). For this, we are going to set-up a “bin” folder, and within the folder a coveralls.js file that contains the following content: + +{% highlight yaml %} +{% raw %} +#!/usr/bin/env node + +'use strict'; + +const { handleInput } = require('..'); + +process.stdin.resume(); +process.stdin.setEncoding('utf8'); + +let input = ''; + +process.stdin.on('data', chunk => { + input += chunk; +}); + +process.stdin.on('end', () => { + handleInput(input, err => { + if (err) { + throw err; + } + }); +}); +{% endraw %} +{% endhighlight %} + +## Create a Coveralls account + +Once you sign-up to Coveralls, you can add a new repository. The UI will then provide you with an access token to the repository. Take note of the token since it will be required in the next sections. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/add-repository.png" +url="/images/testing/coveralls/add-repository.png" +alt="Coveralls repository" +max-width="80%" +%} + +## Codefresh pipeline + + +In case the project that you want to use Coveralls in does not have a pipeline, [create a new pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/). + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/create-coveralls-pipeline.png" +url="/images/testing/coveralls/create-coveralls-pipeline.png" +alt="Create Coveralls Pipeline" +max-width="80%" +%} + +Once you ’create’ the pipeline, a standard codefresh.yml file is generated with three steps: +* The first step will clone your repository; +* The second step will both, build and push your repository to the container registry that you have connected with Codefresh; +* And the third step currently does not do much. +In the next section, we will modify the testing step. + +**Testing step** + +The testing step requires three different environment variables to connect to Coveralls: +* `export COVERALLS_SERVICE_NAME="codefresh"` +* `export COVERALLS_GIT_BRANCH="insert the branch that you will be using with your application"` +* `export COVERALLS_REPO_TOKEN="insert the secret repo token from coveralls.io"` + +{% highlight yaml %} +{% raw %} + test: + title: "Running test" + type: "freestyle" # Run any command + image: "node:15.2" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "export COVERALLS_SERVICE_NAME=${{COVERALLS_SERVICE_NAME}}" + - "export COVERALLS_GIT_BRANCH=${{CF_BRANCH}}" + - "export COVERALLS_REPO_TOKEN=${{COVERALLS_REPO_TOKEN}}" + - "npm install --save-dev jest" + - "npm run test" + stage: "test" +{% endraw %} +{% endhighlight %} + +We specify several variables within this step. Those, which start with ’CF’ are [Codefresh-specific steps]({{site.baseurl}}/docs/pipelines/variables/) and the value is automatically provided by Codefresh once you run the pipeline. Our entire codefresh.yml will look as such: + +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "test" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "anais-codefresh/coveralls-sample-app" + # CF_BRANCH value is auto set when pipeline is triggered + # Learn more at codefresh.io/docs/docs/pipelines/variables/ + revision: "${{CF_BRANCH}}" + git: "github" + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "anaisurlichs/coveralls-sample-app" + working_directory: "${{clone}}" + tag: "${{CF_BRANCH_TAG_NORMALIZED}}" + dockerfile: "Dockerfile" + stage: "build" + registry: "dockerhub" + + test: + title: "Running test" + type: "freestyle" # Run any command + image: "node:15.2" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "export COVERALLS_SERVICE_NAME=${{COVERALLS_SERVICE_NAME}}" + - "export COVERALLS_GIT_BRANCH=${{CF_BRANCH}}" + - "export COVERALLS_REPO_TOKEN=${{COVERALLS_REPO_TOKEN}}" + - "npm install --save-dev jest" + - "npm run test" + stage: "test" +{% endraw %} +{% endhighlight %} + +Once you run the pipeline the steps will create the coverage report and forward it to Coveralls. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-pipeline.png" +url="/images/testing/coveralls/coveralls-pipeline.png" +alt="Pipeline with Coveralls step" +max-width="80%" +%} + +## View reports + +You can view the updated coverage reports within Coveralls UI every time you make a commit and/or run the Codefresh pipeline directly. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-sample-app.png" +url="/images/testing/coveralls/coveralls-sample-app.png" +alt="Coveralls UI Analysis reports" +max-width="80%" +%} + +You can access further information on the coverage report by opening the link to the file displayed in the table. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-specific-report.png" +url="/images/testing/coveralls/coveralls-specific-report.png" +alt="Coveralls report details" +max-width="80%" +%} + +And view a the code coverage of a specific file: +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-coverage.png" +url="/images/testing/coveralls/coveralls-coverage.png" +alt="Coveralls report details" +max-width="80%" +%} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Sonarqube Integration]({{site.baseurl}}/docs/testing/sonarqube-integration/) diff --git a/_docs/example-catalog/ci-examples/cpp-cmake.md b/_docs/example-catalog/ci-examples/cpp-cmake.md new file mode 100644 index 000000000..11f8e963a --- /dev/null +++ b/_docs/example-catalog/ci-examples/cpp-cmake.md @@ -0,0 +1,125 @@ +--- +title: "Compile and test a C++ application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any C/C++ application very easily as both `gcc` and `g++` are already offered in Dockerhub. There is also another example available with [C and make]({{site.baseurl}}/docs/example-catalog/ci-examples/c-make). + +## The example C++ project + +You can see the example project at [https://github.com/codefresh-contrib/cpp-sample-app](https://github.com/codefresh-contrib/cpp-sample-app){:target="\_blank"}. The repository contains a C++ starter project with a `CMakeLists.txt` file: + +* `cmake .` creates the makefiles. +* `make test` runs unit tests +* `make` compiles the code + +The project is also using the [boost testing libraries](https://www.boost.org/){:target="\_blank"}. + +## Cmake, g++ and Docker + +Creating a CI/CD pipeline for C is very easy, because Codefresh can run any [gcc image](https://hub.docker.com/_/gcc/){:target="\_blank"} that you wish. Gcc docker images already contain the `make` utility but not the the `cmake` one. Therefore we will first create a Dockerfile that has `g++`, cmake and the boost libraries. You can follow the same pattern for other development tools that you use. + + +Here is the Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM gcc:9.2 + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update && apt-get install -y cmake libgtest-dev libboost-test-dev && rm -rf /var/lib/apt/lists/* + +CMD ["cmake"] + +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the GCC image +1. Installs cmake and boost +1. Sets cmake as the default command + +## Create a CI pipeline for C++ applications + +We can now use the custom Docker image in order to compile/test the C++ application: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/cc/cpp-cmake-pipeline.png" +url="/images/learn-by-example/cc/cpp-cmake-pipeline.png" +alt="Compiling a C++ application in a pipeline" +caption="Compiling a C++ application in a pipeline" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/cpp-sample-app/blob/master/codefresh.yml){:target="\_blank"} that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - prepare + - build +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'codefresh-contrib/cpp-sample-app' + revision: master + git: github + build_dev_image: + title: Building Dev Image + stage: prepare + type: build + image_name: cmake + working_directory: ./dev/ + tag: 'latest' + dockerfile: Dockerfile + create_makefiles: + title: Create Makefiles + stage: prepare + image: ${{build_dev_image}} + commands: + - cmake . + compile_my_sources: + title: Compile + stage: build + image: ${{build_dev_image}} + commands: + - make + run_my_tests: + title: Test + stage: build + image: ${{build_dev_image}} + commands: + - make test +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. clones the source code +1. Creates a development docker image that has g++, cmake and boost +1. Runs cmake on the source code to create the make files +1. Compiles the source code +1. Runs unit tests + +You can add additional tools in the pipeline by extending the Dockerfile mentioned in the previous section. You can also +change the version of Gcc/g++ by starting from a different public or private Docker image. + + +## Related articles +[C example]({{site.baseurl}}/docs/example-catalog/ci-examples/c-make/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/codefresh-yaml/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/decryption-with-mozilla-sops.md b/_docs/example-catalog/ci-examples/decryption-with-mozilla-sops.md new file mode 100644 index 000000000..d091ca245 --- /dev/null +++ b/_docs/example-catalog/ci-examples/decryption-with-mozilla-sops.md @@ -0,0 +1,177 @@ +--- +title: "Decrypt with Mozilla SOPS" +description: "Store secrets in your repository and decrypt them using Mozilla SOPS" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/) +- A public and private GnuGP key pair +- A credentials yaml, that is encrypted using Mozilla SOPS, and stored in your repository + +## Example Java application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/mozilla-sops-app){:target="\_blank"}. + +The example application retrieves the system variable "password," from the pipeline and uses it to authenticate to a Redis database, but you are free to use any type of database of your choosing. + +```java + String password = System.getenv("password"); + String host = System.getProperty("server.host"); + + RedisClient redisClient = new RedisClient( + RedisURI.create("redis://" + password + "@" + host + ":6379")); + RedisConnection connection = redisClient.connect(); +``` + +Also in the example application is a simple unit test that ensures we are able to read and write data to the database. + +An encrypted credentials file is stored in the repository (along with a public key): + +`credentials.yaml` +```yaml +password: ENC[AES256_GCM,data:Jsth2tY8GhLgj6Jct27l,iv:3vcKVoD5ms29R5SWHiFhDhSAvvJTRzjn9lA6woroUQ8=,tag:OjkLvcHxE4m5RSCV7ej+FA==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + lastmodified: '2020-03-30T19:12:49Z' + mac: ENC[AES256_GCM,data:jGMTkFhXjgGMdWBpaSWjGZP6fta3UuYjEsnqziNELQZ2cLScT9v+GKg/c8iJYv1Gfiz3aw4ivYYrWzwmZehIbPHaw3/XBv/VRCQhzRWYKaf6pPFUXIS7XALSf9L9VbGOXL/CGPRae3t3HpaOor+knd6iQk2WR3K9kSeib4RBSCE=,iv:WSP8hBwaBv3ymTGltBOaVVC1sT08IG4hwqESlG8rN9w=,tag:3hZvCuql+ASWe/Mm5Bl7xg==,type:str] + pgp: + - created_at: '2020-03-30T19:12:49Z' + enc: | + -----BEGIN PGP MESSAGE----- + hQGMA9TqgBq6RQVRAQv/UouNaHfxkJ5PwXLvda97Fgj/2ew2VXPAlAnLvoGvTsb2 + U4GXcaE7c4mYf7wSKF9k/F0FZTUEnd3CRji/OqjrNyvj5zI/9KGRABCKvzjsx+ZG + JolVnDifHl78Mor1CUPQ4JXasHKbVSlNLMGgDHIsvpeC7f7pIi8YDUDIa3/zXhFK + jcKzz4nlrW1Ph8zukmQk49Xvv6+DFj2NTptOB3U6mh79RCdnyCSRHxA3f0X00Pi5 + g0p5x46S5E04uC2wXrZv8i/gyQbLHxwjmdbLq+P1Peu4/i9eSZZOpx0mc1KJ2mjr + oKRvgnUFz3xuYrSNzjC1vM01UbuSytlwx+S3J7VVLPSZRso1sbgv2+ylUOAHS+gZ + 64uL0j/BZrF4wZI8y8zr0nJ6cZLiiF3LeXhfcuWJJ7+5p1OBEvfO+sWorLahIZTw + pogYPDpz4rGnrJRKBkNsVlYuUG8aNerIfhEBr6n//VJtt7QXTEXraLCTt4a6z/Fl + R6YSeNCKWQlURrTfm4Kv0lwBzMTLUb+Fg3HO8ShhiE9/2dKTSJkRJMVXRDp22Fm1 + vO/wMFUjg6Dkrj1LVqQ9zcXc5QElgc4mF/V7SazacbQ7/g67tVtUrTit9LXgR9A0 + k7wU5iT5oWLJtWwpkA== + =Il2p + -----END PGP MESSAGE----- + fp: C70833A85193F72C2D72CB9DBC109AFC69E0185D + encrypted_regex: password + version: 3.5.0 +``` +You cannot run the application locally, as it needs to run in the pipeline in order to use our environment variables to connect. + +## Create pipeline + +The pipeline contains four stages: + +- A stage for cloning +- A stage for importing the private/public keypair +- A stage for decrypting the credentials file +- A stage for packaging our jar and running unit tests + +{% include image.html +lightbox="true" +file="/images/examples/secrets/mozilla-sops-pipeline.png" +url="/images/examples/secrets/mozilla-sops-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +First, you need to add a pipeline variable, `PRIV_KEY`, for your private key. You can do that in the UI by navigating to the in-line YAML editor and to the right-hand side, you will find the **Variables** tab: + +{% include image.html +lightbox="true" +file="/images/examples/secrets/mozilla-sops-pipeline-vars.png" +url="/images/examples/secrets/mozilla-sops-pipeline-vars.png" +alt="Mozilla SOPS Pipeline Variables" +caption="Pipeline Variables" +max-width="90%" +%} + +You can also add this [directly in the YAML itself]({{site.baseurl}}/docs/how-to-guides/migrating-from-travis-ci/#environment-variables). + +Here is the entire pipeline: + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ci-examples/ + +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "import" + - "decrypt" + - "package" + +steps: + clone: + title: "Cloning repository..." + type: "git-clone" + stage: "clone" + arguments: + repo: "codefresh-contrib/mozilla-sops-app" + revision: "master" + + import_keys: + title: "Importing gpg keys..." + type: "freestyle" + stage: "import" + working_directory: '${{clone}}' + arguments: + image: "vladgh/gpg" + commands: + - gpg --import public.key + - echo -e "${{PRIV_KEY}}" > private.key + - gpg --allow-secret-key-import --import private.key + + decrypt_password: + title: "Decrypting password..." + type: "freestyle" + working_directory: "${{clone}}" + stage: "decrypt" + arguments: + image: "mozilla/sops" + commands: + - cp -r /codefresh/volume/.gnupg /root/.gnupg + - cf_export password=$(sops --decrypt --extract '["password"]' credentials.yaml) + + package_jar: + title: "Packaging jar and running unit tests..." + working_directory: ${{clone}} + stage: "package" + arguments: + image: "maven:3.5.2-jdk-8-alpine" + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dserver.host=my-redis-db-host clean package + services: + composition: + my-redis-db-host: + image: 'redis:4-alpine' + command: 'redis-server --requirepass $password' + ports: + - 6379 +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the main repository through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Uses a GPG image and imports the public and private key pair through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Decrypts the credentials file through a different freestyle step. At this step, SOPS looks for the .gnupg directory (where the keyring is stored) under /root. We need to copy it from the [Codefresh Volume]({{site.baseurl}}/docs/pipelines/steps/freestyle/#custom-volumes), as /root is not saved between containers. +4. The last step, `package_jar`, does a few special things to take note of: + - Spins up a [Service Container]({{site.baseurl}}/docs/pipelines/service-containers/) running Redis on port 6379 , and sets the password to the database using our exported environment variable + - Sets `maven.repo.local` to cache Maven dependencies into the local codefresh volume to [speed up builds]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/#caching-the-maven-dependencies) + - Runs unit tests and packages the jar. Note how you can directly refer to the service container's name (`my-redis-db-host`) when we set `server.host` + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Vault secrets in pipelines]({{site.baseurl}}/docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline/) + diff --git a/_docs/example-catalog/ci-examples/django.md b/_docs/example-catalog/ci-examples/django.md new file mode 100644 index 000000000..fcc3e75df --- /dev/null +++ b/_docs/example-catalog/ci-examples/django.md @@ -0,0 +1,174 @@ +--- +title: "Python Django example" +description: "Create Docker images for Python applications" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/django/ + - /docs/python/django/ +toc: true +--- +Codefresh can work with Python projects using any of the popular frameworks. In this page we will see Django. For a Flask example see the [quick start guide]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/). + +## The example Django project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-python-django](https://github.com/codefreshdemo/cf-example-python-django){:target="\_blank"}. The repository contains a Django starter project with the following commands: + +* `pip install -r requirements.txt` install dependencies. +* `python -m unittest composeexample.utils` runs unit tests. +* `python manage.py runserver 0.0.0.0:8000` to start the application locally. + + +Once launched the application presents the Django starter page at localhost:8000. + +## Django and Docker + +The easiest way to build a Django application is with a Dockerfile that contains everything. This is very convenient as the Docker image can contain everything you need (i.e. app plus test frameworks) inside a pipeline. + + +Here is the Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM python:3.6-slim + +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 +RUN mkdir /code +WORKDIR /code +RUN pip install --upgrade pip +COPY requirements.txt /code/ + +RUN pip install -r requirements.txt +COPY . /code/ + +EXPOSE 8000 + +CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"] +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the Python image +1. Sets some environment variables +1. Copies the dependencies file inside the container +1. Upgrades pip and installs all dependencies +1. Copies the rest of the source code +1. Starts the Django app + +You can build this image locally on your workstation and then launch it to test the application. + +### Create a CI pipeline for Python/Django + +Creating a CI/CD pipeline for Django is very easy if you already have the Dockerfile with all required dependencies. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/python/python-build-test.png" +url="/images/learn-by-example/python/python-build-test.png" +alt="Creating a Docker image for Python" +caption="Creating a Docker image for Python" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - test +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefreshdemo/cf-example-python-django' + revision: master + git: github + build_my_image: + title: Building Docker Image + stage: build + type: build + image_name: my-django-image + working_directory: ./ + tag: master + dockerfile: Dockerfile + test_my_image: + title: Running unit tests + stage: test + image: '${{build_my_image}}' + commands: + - python -m unittest composeexample.utils +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, creates a Docker image and then uses the same image to run unit tests. Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +### Running tests before building the docker image + +Sometimes if you have a complex application you might want to run integration tests (or other Python commands), *before* building the Docker image. This scenario is also supported natively by Codefresh. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/python/python-test-build.png" +url="/images/learn-by-example/python/python-test-build.png" +alt="Building the image after tests have run" +caption="Building the image after tests have run" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefreshdemo/cf-example-python-django/blob/master/codefresh-build-after-test.yml){:target="\_blank"} builds the docker image after tests have already executed. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefreshdemo/cf-example-python-django' + revision: master + git: github + test_the_code: + title: Run unit tests + stage: test + image: python:3.6-slim + commands: + - pip install -r requirements.txt --cache-dir=/codefresh/volume/pip-cache + - python -m unittest composeexample.utils + build_my_image: + title: Building Docker Image + stage: build + type: build + image_name: my-django-image + working_directory: ./ + tag: full + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/pipeline-caching/) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for pip which keeps its cache externally (e.g. `~/.cache/pip`). By changing the location of the Pip cache on the project folder (the `pip-cache` name is arbitrary) we make sure that Codefresh will cache automatically the Pip libraries resulting in much faster builds. + +## Related articles +[Python examples]({{site.baseurl}}/docs/example-catalog/ci-examples/python/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/dotnet.md b/_docs/example-catalog/ci-examples/dotnet.md new file mode 100644 index 000000000..173a95693 --- /dev/null +++ b/_docs/example-catalog/ci-examples/dotnet.md @@ -0,0 +1,115 @@ +--- +title: "C# on .NET Core" +description: "How to build a C# project in Codefresh" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any .NET core application very easily as there are official [Docker images from Microsoft](https://hub.docker.com/_/microsoft-dotnet-core){:target="\_blank"}. + +## The example C# project + +You can see the example project at [https://github.com/dotnet-architecture/eShopOnWeb](https://github.com/dotnet-architecture/eShopOnWeb){:target="\_blank"}. The repository contains a C# Web project with 3 kinds of tests. It has different tags for each version of .NET Core and has + +* a `docker-compose.yml` file for local development +* a `tests` directory with all types of tests +* a Dockerfile at `/src/Web` + +There are also previous releases at [https://github.com/dotnet-architecture/eShopOnWeb/releases](https://github.com/dotnet-architecture/eShopOnWeb/releases){:target="\_blank"}. + +### Create a CI pipeline for C# applications + +Creating a CI/CD pipeline for C# is very easy, because Codefresh can run any SDK image version that you wish. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/dotnet/dotnetcore-pipeline.png" +url="/images/learn-by-example/dotnet/dotnetcore-pipeline.png" +alt="Compiling a C# application in a pipeline" +caption="Compiling a C# application in a pipeline" +max-width="80%" +%} + +Here is the full pipeline that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'dotnet-architecture/eShopOnWeb' + revision: 'netcore3.0' + git: github-1 + my_unit_tests: + title: Unit tests + stage: test + image: mcr.microsoft.com/dotnet/core/sdk:3.0 + working_directory: './tests/UnitTests/' + commands: + - dotnet test + my_integration_tests: + title: Integration tests + stage: test + image: mcr.microsoft.com/dotnet/core/sdk:3.0 + working_directory: './tests/IntegrationTests/' + commands: + - dotnet test + my_functional_tests: + title: Fuctional tests + stage: test + image: mcr.microsoft.com/dotnet/core/sdk:3.0 + working_directory: './tests/FunctionalTests/' + commands: + - dotnet test + my_app_docker_image: + title: Building Docker Image + type: build + stage: build + image_name: dotnetcore-eshop + working_directory: ./ + tag: latest + dockerfile: src/Web/Dockerfile +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. clones the source code +1. Uses the official `mcr.microsoft.com/dotnet/core/sdk:3.0` image to run unit/integration/functional tests in 3 different folders +1. Builds the application docker image using the root folder as Docker context but with the Dockerfile located at `./src/Web` + + + + + +## Related articles +[C/C++ examples]({{site.baseurl}}/docs/learn-by-example/cc/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + + + + + diff --git a/_docs/example-catalog/ci-examples/fan-in-fan-out.md b/_docs/example-catalog/ci-examples/fan-in-fan-out.md new file mode 100644 index 000000000..a8c2b3d16 --- /dev/null +++ b/_docs/example-catalog/ci-examples/fan-in-fan-out.md @@ -0,0 +1,204 @@ +--- +title: "Fan-out-fan-in pipeline" +description: "Use parallel mode to fan-in and fan-out your step dependencies" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +In pipelines, the concept of fan-in/fan-out is depicted in the diagram below. This pipeline offers parallel sub-flows within the same pipeline. Fan-out refers to spreading a task to multiple destinations in parallel, and fan-in is the opposite, where we spread multiple tasks to the same destination. + +{% include image.html +lightbox="true" +file="/images/examples/unit-tests/parallel-pipeline-examples.png" +url="/images/examples/unit-tests/parallel-pipeline-examples.png" +alt="parallel pipeline diagraam" +caption="Parallel Mode Diagram" +max-width="100%" +%} + +As you can see in the diagram, Step1 fans out to Step2 and Step4 (which run in parallel), while Step3 and Step4 fan-in to Step5. + +You can achieve parallelism in your Codefresh pipelines by using the following: + +- Simple parallel jobs ([inserting parallel steps into a sequential pipeline]({{site.baseurl}}/docs/pipelines/advanced-workflows/#inserting-parallel-steps-in-a-sequential-pipeline)) +- [Full parallel mode]({{site.baseurl}}/docs/pipelines/advanced-workflows/#parallel-pipeline-mode) +- Fan-out/fan-in parallel pipelines, as described in this article + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) + +## Example project + +You can find the example Spring boot application on [GitHub](https://github.com/codefresh-contrib/fan-out-fan-in-sample-app.git){:target="\_blank"}. It is a simple Hello World application with several different types of tests we will use to run using Codefresh's parallel mode. + +## Create the pipeline + +Our pipeline will have five stages: setup, start, web-tests, smoke, and end: + +{% include image.html +lightbox="true" +file="/images/examples/unit-tests/fan-in-fan-out-pipeline.png" +url="/images/examples/unit-tests/fan-in-fan-out-pipeline.png" +alt="fan-in-fan-out UI pipeline view" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor in the Codefresh UI. It will automatically clone the project for you. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +mode: parallel +stages: +- setup +- start +- web-tests +- smoke +- end +steps: + Clone: + title: Cloning main repository... + stage: setup + type: git-clone + arguments: + repo: codefresh-contrib/fan-out-fan-in-sample-app + git: github + revision: master + Build_image: + title: Building Docker Image... + type: build + stage: setup + working_directory: ${{Clone}} + arguments: + image_name: spring-backend + tag: latest + dockerfile: Dockerfile + when: + steps: + - name: Clone + on: + - success + Step1: + title: Running unit tests... + stage: start + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="unit" test + when: + steps: + - name: Build_image + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step2: + title: Running web mock test... + stage: web-tests + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="web-mock" test + when: + steps: + - name: Step1 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step3: + title: Running smoke test... + stage: smoke + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="smoke" test + when: + steps: + - name: Step2 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step4: + title: Running web layer tests... + stage: web-tests + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="web-layer" test + when: + steps: + - name: Step1 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step5: + title: Running integration tests... + stage: end + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="integration" test + when: + steps: + - name: Step3 + on: + - success + - name: Step4 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +>Note the special use of `mode: parallel` declared at the root of our yaml. This syntax makes the pipeline use the full parallel mode. +The order of your build steps doesn't matter in this case, each step is executed according to its [condition]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/). + +- Step1 (unit tests) fans out to Step2 and Step4 (web tests), which run in parallel +- Step3 (smoke tests) does not execute until Step2 is completed +- Step3 and Step4 fan in to the final step, Step5 (integration tests) + +This pipeline consists of the following: + +1. Clones the main repository through a [Git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Builds the cloned source code into a Docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +3. Runs [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that: + - Run unit tests according to their respective @Tags + - Use the image built in the second step as a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Parallel pipeline mode]({{site.baseurl}}/docs/pipelines/advanced-workflows/#parallel-pipeline-mode) + diff --git a/_docs/example-catalog/ci-examples/general.md b/_docs/example-catalog/ci-examples/general.md new file mode 100644 index 000000000..3cec98bc8 --- /dev/null +++ b/_docs/example-catalog/ci-examples/general.md @@ -0,0 +1,16 @@ +--- +title: "General" +description: "" +group: example-catalog +redirect_from: + - /docs/learn-by-example/general/ +toc: true +--- +This section contains Codefresh examples based on other technologies. +{% comment %} +links not available in base documentation +- [How to trigger the another pipeline using cf-cli](doc:how-to-trigger-another-pipeline-using-cf-cli) +- [How to run composition using cf-cli](doc:how-to-run-composition-using-cf-cli-1) +- [How to spin up image using cf-cli](doc:how-to-spin-up-image-using-cf-cli) +{% endcomment %} +- [Selenium test]({{site.baseurl}}/docs/learn-by-example/general/selenium-test/) diff --git a/_docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process.md b/_docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process.md new file mode 100644 index 000000000..b071c29b0 --- /dev/null +++ b/_docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process.md @@ -0,0 +1,77 @@ +--- +title: "Use Git Hash in CI" +description: "Get short SHA ID and use it in a CI Process" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/how-to-guides/ + - /docs/how-get-first-8-digits-of-sha/ +toc: true +old_url: /docs/how-get-first-8-digits-of-sha +--- + +## Get the short SHA ID +Add the following variable to your script: + +{% highlight text %} +{% raw %} +${{CF_SHORT_REVISION}} +{% endraw %} +{% endhighlight %} + + +## Use the SHA ID in a tag + + +{% highlight text %} +{% raw %} +tag: ${{CF_SHORT_REVISION}} +{% endraw %} +{% endhighlight %} + + +## YAML example + +{% highlight yaml %} +{% raw %} +step-name: + type: build + description: Free text description + working-directory: ${{clone-step-name}} + dockerfile: path/to/Dockerfile + image-name: owner/new-image-name + tag: ${{CF_SHORT_REVISION}} + build-arguments: + - key=value + fail-fast: false +{% endraw %} +{% endhighlight %} + +## Result in [hub.docker](https://hub.docker.com){:target="_blank"} + +{% include image.html +lightbox="true" +file="/images/examples/git/sha-id-docker-hub.png" +url="/images/examples/git/sha-id-docker-hub.png" +alt="SHA ID in Docker Hub" +caption="SHA ID in Docker Hub" +max-width="60%" +%} + +## Result in Codefresh + +{% include image.html +lightbox="true" +file="/images/examples/git/sha-id-codefresh.png" +url="/images/examples/git/sha-id-codefresh.png" +caption="SHA ID in Codefresh" +alt="SHA ID in Codefresh" +max-width="60%" +%} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/git-checkout-custom.md b/_docs/example-catalog/ci-examples/git-checkout-custom.md new file mode 100644 index 000000000..9a17e018f --- /dev/null +++ b/_docs/example-catalog/ci-examples/git-checkout-custom.md @@ -0,0 +1,106 @@ +--- +title: "Using custom Git commands" +description: "Manually clone Git repositories" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/git-clone-private-repository-using-freestyle-step/ + - /docs/example-catalog/ci-examples/git-clone-private-repository-using-freestyle-step/ +toc: true +--- + +>Manually running Git commands is an advanced technique. For most use cases you should use the [native Git checkout]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) offered by Codefresh. + +For complex cloning, you can still use custom clone commands in a freestyle step. In this case, +you lose the native Codefresh integration such as Git authentication and automatic workdir setup. Use custom clone commands only as a last resort. + + +## Cloning with the Git executable + +It is very easy to run custom Git commands in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). Pass any parameters to the Git clone step as you would pass them on your local workstation. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomClone: + title: Performing swallow clone + image: alpine/git:latest + commands: + - rm -rf ruby-on-rails-sample-app + - git clone --depth 1 https://github.com/codefresh-contrib/ruby-on-rails-sample-app.git + PrintFileList: + title: 'Listing files' + image: alpine:latest + working_directory: './ruby-on-rails-sample-app' + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +Notice the `rm` command before the clone step. This makes sure that every time the pipeline runs, the `git clone` step is implemented in an empty directory. Otherwise the `git clone` command will fail (Git will refuse to clone on an existing directory). + +You can enter your own Git username/password or [reuse the credentials]({{site.baseurl}}/docs/pipelines/steps/git-clone/#reuse-a-git-token-from-codefresh-integrations) from the Codefresh integration. + +## Manually running Git commands + +Once you understand that you can manually run Git commands in Codefresh pipelines, it is easy to see that any Git workflow is possible. +Here is an example where an application is packaged in a Docker container, after merging `master` to a specific branch. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomClone: + title: Performing swallow clone + image: alpine/git:latest + commands: + - rm -rf example_nodejs_postgres + - git clone https://github.com/kostis-codefresh/example_nodejs_postgres + - cd example_nodejs_postgres + - git checkout experiment1 + - git merge master + - git status + myDockerImage: + title: 'BuildingDockerImage' + type: build + dockerfile: Dockerfile + working_directory: './example_nodejs_postgres' + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +If there are any errors with the merge, the pipeline fails automatically. Codefresh automatically stops any pipeline that shows an error in a step. + +## Other forms of cloning + +There is nothing special about running Git it in a freestyle step. In fact, you can check out code with any other command that you would run locally in your terminal. + +Here is an example with Golang. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomClone: + title: Download example + image: golang:1.11-alpine + commands: + - apk add --no-cache git + - go get github.com/golang/example/hello +{% endraw %} +{% endhighlight %} + +If you run this pipeline you will see git used as part of the `go get` mechanism. + +More examples such as using SSH keys and working with GIT submodules can be found in the [clone step documentation]({{site.baseurl}}/docs/pipelines/steps/git-clone/). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Native Git checkout]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) +[Native Git integration]({{site.baseurl}}/docs/integrations/git-providers/) +[Freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) +[Git Clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) + diff --git a/_docs/example-catalog/ci-examples/git-checkout.md b/_docs/example-catalog/ci-examples/git-checkout.md new file mode 100644 index 000000000..81cc9b236 --- /dev/null +++ b/_docs/example-catalog/ci-examples/git-checkout.md @@ -0,0 +1,203 @@ +--- +title: "Check out Git repositories" +description: "Use the Codefresh native GIT integration" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh has native support for Git repositories and Git triggers. First you need to set up a [Git integration]({{site.baseurl}}/docs/integrations/git-providers/) (your administrator might also have done this for you already). + +{% include image.html +lightbox="true" +file="/images/integrations/git/git-integrations.png" +url="/images/integrations/git/git-integrations.png" +alt="GIT integrations" +caption="GIT integrations" +max-width="70%" +%} + +You can add a new integration for any cloud provider or even [on-premises]({{site.baseurl}}/docs/reference/behind-the-firewall/) ones. By default you will also have a provider set up if you used one for Codefresh signup (GitHub, GitLab or Bitbucket). + +For each Git Integration, make sure that you note down its name, as you will use in your pipeline inside a [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step. + + +## Cloning a specific repository + +The simplest way to clone using your git provider is by specifying the exact repository details. +Here is a pipeline that clones a git repository and creates a Docker image from a Dockerfile: + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: kostis-codefresh/example_nodejs_postgres + revision: master + git: github-1 + myDockerImage: + title: 'Building My Docker Image' + type: build + dockerfile: Dockerfile + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +This syntax is very simple to use, but it has the disadvantage that ties your pipeline to a specific repository. This makes +the pipeline impossible to re-use among different micro-services (that are built in a similar manner). + +## Cloning the triggered repository (recommended) + +The proper way to use git-clone steps is to make them trigger specific. Instead of hard-coding the git repository that is checked-out, it is best to checkout the same one that [triggered the pipeline]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/). This is what you want in most scenarios anyway. + +This can be achieved by using Codefresh [variables]({{site.baseurl}}/docs/pipelines/variables/) to refer to the trigger. +Here is the same pipeline as before, written in a generic way: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: github-1 + myDockerImage: + title: 'Building My Docker Image' + type: build + dockerfile: Dockerfile + image_name: my-app-image + tag: ${{CF_BRANCH_TAG_NORMALIZED}} +{% endraw %} +{% endhighlight %} + +The big advantage of this pipeline is that it can be reused for *ALL* your projects that follow the same pattern of having a Dockerfile in the root of the git repository. + +{% include image.html +lightbox="true" +file="/images/examples/checkout/add-new-microservice.png" +url="/images/examples/checkout/add-new-microservice.png" +alt="Reusing a pipeline between microservices" +caption="Reusing a pipeline between microservices" +max-width="50%" +%} + +Thus you can have a single pipeline and when you want to enable it for a new micro-service you can simply add a new [git trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) for it. + +You still run the pipeline manually if you wish. In this case you will be asked which trigger you want to "simulate" so that the variable pipelines are correctly replaced by Codefresh. + +{% include image.html +lightbox="true" +file="/images/examples/checkout/simulate-trigger.png" +url="/images/examples/checkout/simulate-trigger.png" +alt="Simulating a GIT trigger" +caption="Simulating a GIT trigger" +max-width="50%" +%} + +This is the recommended way of creating re-usable pipelines in Codefresh. + +## Cloning a repository with Codefresh Runner +If you have the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) installed, you need to use +the fully qualified path of the Git repository: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: https://github-internal.example.com/my-username/my-app + revision: '${{CF_REVISION}}' + git: my-internal-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +More details can be found in the [private Git instructions page]({{site.baseurl}}/docs/reference/behind-the-firewall/#checking-out-code-from-a-private-git-repository). + + +## Working inside the cloned directory + +Normally each [pipeline step]({{site.baseurl}}/docs/pipelines/steps/) in Codefresh can be named as you want. Specifically, for the Git-clone step however the name `main_clone` is special. + +If you name your clone step as `main_clone`, Codefresh automatically changes the working directory for all the next (non Git-clone) pipeline steps, to be the same as the project that was just checked out. This only applies to [built-in]({{site.baseurl}}/docs/pipelines/steps/#built-in-steps) Codefresh steps and not [custom plugins]({{site.baseurl}}/docs/pipelines/steps/#creating-a-typed-codefresh-plugin). + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/checkout.png" +url="/images/pipeline/introduction/checkout.png" +alt="Checkout structure" +caption="Checkout structure" +max-width="50%" +%} + +This is probably what you want anyway, so make sure that you name your Git-clone steps as `main_clone`. If you use any other name, then the working folder will be the parent of the checked-out project which is the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) at `/codefresh/volume`. + +If you have more then one clone step in a pipeline, it is recommended to define the working directory explicitly (see next example), instead +of depending on the `main_clone` naming convention, which is best used in pipelines with a single clone step. + +## Cloning multiple repositories + +You can use as many clone steps as you want and at any position in the pipeline. + +Here is an example where two repositories are checked out and two docker images are then built. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + checkoutApp1: + title: 'Cloning first repository...' + type: git-clone + repo: kostis-codefresh/example_nodejs_postgres + revision: experiment1 + git: github + myFirstDockerImage: + title: 'Building First Docker Image' + type: build + dockerfile: Dockerfile + image_name: my-nodejs-image + tag: from-develop-branch + working_directory: './example_nodejs_postgres' + checkoutApp2: + title: 'Cloning second repository...' + type: git-clone + repo: kostis-codefresh/trivial-go-web + revision: master + git: github + mySecondDockerImage: + title: 'Building Second Docker Image' + type: build + dockerfile: Dockerfile + working_directory: './trivial-go-web' + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +Notice that in this case the git-clone steps are **not** named `main_clone` and therefore we specify exactly what is the working directory for each one. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Git integrations]({{site.baseurl}}/docs/integrations/git-providers/) +[Git triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Clone step in pipelines]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Custom git commands]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout-custom/) diff --git a/_docs/example-catalog/ci-examples/gitops-secrets.md b/_docs/example-catalog/ci-examples/gitops-secrets.md new file mode 100644 index 000000000..1db214dcc --- /dev/null +++ b/_docs/example-catalog/ci-examples/gitops-secrets.md @@ -0,0 +1,229 @@ +--- +title: "Secrets with GitOps" +description: "Store secrets in Git with Bitnami sealed secrets" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) +- A Kubernetes cluster +- The [Codefresh GitOps agent]({{site.baseurl}}/docs/integrations/argocd/) installed on the cluster + +## Using the Bitnami Sealed secrets controller + +If you follow [GitOps](https://codefresh.io/gitops/){:target="\_blank"}, then you should already know that everything should be placed under source control, and Git is to be used as the single source of truth. + +This presents a challenge with secrets that are needed by the application, as they must never be stored in Git in clear text under any circumstance. + +To solve this issue, we can use the [Bitnami Sealed secrets controller](https://github.com/bitnami-labs/sealed-secrets){:target="\_blank"}. This is a Kubernetes controller that can be used to encrypt/decrypt your application secrets in a secure way. + +The order of events is the following: + +1. You install the Bitnami Sealed secrets controller in the cluster. It generates a public and private key. The private key stays in the cluster and is never revealed. +1. You take a raw secret and use the `kubeseal` utility to encrypt it. Encryption happens with the public key of the cluster that you can give to anybody. +1. The encrypted secrets are stored in Git. There are safe to be committed and nobody can decrypt them without direct access to the cluster +1. During runtime you deploy the sealed secret like any other Kubernetes manifest. The controller converts them to [plain Kubernetes secrets](https://kubernetes.io/docs/concepts/configuration/secret/){:target="\_blank"} on the fly using the private key of the cluster +1. Your application reads the secrets like any other Kubernetes secret. Your application doesn't need to know anything about the sealed secrets controller or how the encryption decryption works. + + +To use the controller first install it in your cluster: + +``` +helm repo add sealed-secrets https://bitnami-labs.github.io/sealed-secrets +helm repo update +helm install sealed-secrets-controller sealed-secrets/sealed-secrets +``` + +By default, the controller is installed at the `kube-system` namespace. The namespace +and release names are important, since if you change the defaults, you need to set them up +with `kubeseal` as well, as you work with secrets. + +Download the `kubeseal` CLI: +``` +wget https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.16.0/kubeseal-linux-amd64 -O kubeseal +sudo install -m 755 kubeseal /usr/local/bin/kubeseal +``` + +## Example application + +You can find the example project at [https://github.com/codefresh-contrib/gitops-secrets-sample-app](https://github.com/codefresh-contrib/gitops-secrets-sample-app){:target="\_blank"}. + +It is a web application that prints out several secrets which are [read from the filesystem](https://github.com/codefresh-contrib/gitops-secrets-sample-app/blob/main/settings.ini){:target="\_blank"}: + +`settings.ini` +```ini +[security] +# Path to key pair +private_key = /secrets/sign/key.private +public_key= /secrets/sign/key.pub + +[paypal] +paypal_url = https://development.paypal.example.com +paypal_cert=/secrets/ssl/paypal.crt + +[mysql] +db_con= /secrets/mysql/connection +db_user = /secrets/mysql/username +db_password = /secrets/mysql/password +``` + +The application itself knows nothing about Kubernetes secrets, mounted volumes or any other cluster resource. It only reads its own filesystem at `/secrets` + +This folder is populated inside the pod with [secret mounting](https://github.com/codefresh-contrib/gitops-secrets-sample-app/blob/main/manifests/deployment.yml){:target="\_blank"}: + +```yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gitops-secrets-deploy +spec: + replicas: 1 + selector: + matchLabels: + app: gitops-secrets-app + template: + metadata: + labels: + app: gitops-secrets-app + spec: + containers: + - name: gitops-secrets-app + image: docker.io/kostiscodefresh/gitops-secrets-sample-app:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + volumeMounts: + - name: mysql + mountPath: "/secrets/mysql" + readOnly: true + - name: paypal + mountPath: "/secrets/ssl" + readOnly: true + - name: sign-keys + mountPath: "/secrets/sign/" + readOnly: true + livenessProbe: + httpGet: + path: /health + port: 8080 + readinessProbe: + httpGet: + path: /health + port: 8080 + volumes: + - name: mysql + secret: + secretName: mysql-credentials + - name: paypal + secret: + secretName: paypal-cert + - name: sign-keys + projected: + sources: + - secret: + name: key-private + - secret: + name: key-public + +``` + +This way there is a clear separation of concerns. + + + +You can find the secrets themselves at [https://github.com/codefresh-contrib/gitops-secrets-sample-app/tree/main/never-commit-to-git/unsealed_secrets](https://github.com/codefresh-contrib/gitops-secrets-sample-app/tree/main/never-commit-to-git/unsealed_secrets){:target="\_blank"}. There are encoded with base64 so they are **NOT** safe to commit in Git. + +>Note that for demonstration purposes, the Git repository contains raw secrets so that you can encrypt them yourself. In a production application, the Git repository must only contain sealed/encrypted secrets. + +## Preparing the secrets + +The critical point of this application is to encrypt all the secrets and place them in Git. +By default, the sealed secrets controller encrypts a secret according to a specific namespace (this behavior is configurable), so you need to decide in advance which namespace wil host the application. + +Then encrypt all secrets as below: + +``` +kubectl create ns git-secrets +cd safe-to-commit/sealed_secrets +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/db-creds.yml > db-creds.json +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/key-private.yml > key-private.json +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/key-public.yml > key-public.json +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/paypal-cert.yml > paypal-cert.json +kubectl apply -f . -n git-secrets + +``` + +You now have encrypted your plain secrets. These files are safe to commit to Git. +You can see that they have been converted automatically to plain secrets with the command: + +``` +kubectl get secrets -n git-secrets +``` + +## Manually deploying the application + +Note that the application requires all secrets to be present: + +``` +cd safe-to-commit/manifests +kubectl apply -f . -n git-secrets +``` + +You can now visit the application url to see how it has access to all the secrets. + + +## Deploying the application with Codefresh GitOps + +Of course the big advantage of having everything committed into Git, is the ability to adopt GitOps +for the whole application (including secrets). + +This means that you can simply [point Codefresh GitOps to your repository]({{site.baseurl}}/docs/integrations/argocd/#creating-argocd-applications) and have the application +automatically deploy in the cluster. + +{% include image.html +lightbox="true" +file="/images/examples/sealed-secrets/add-app.png" +url="/images/examples/sealed-secrets/add-app.png" +alt="Creating a GitOps application" +caption="Creating a GitOps application" +max-width="50%" +%} + +You can then see the application in the GitOps dashboard: + +{% include image.html +lightbox="true" +file="/images/examples/sealed-secrets/current-state.png" +url="/images/examples/sealed-secrets/current-state.png" +alt="GitOps dashboard" +caption="GitOps dashboard" +max-width="90%" +%} + +If you visit its URL you will see the secrets being loaded: + +{% include image.html +lightbox="true" +file="/images/examples/sealed-secrets/app-secrets.png" +url="/images/examples/sealed-secrets/app-secrets.png" +alt="Application using secrets" +caption="Application using secrets" +max-width="90%" +%} + + +>Note that for simplicity reasons the same Git repository holds both the application source code and its +manifests. In an actual application, you should have two Git repositories (one of the source code only and one of the manifests). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh GitOps]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/) +[Using secrets]({{site.baseurl}}/docs/pipelines/secrets-store/) +[Secrets with Mozilla Sops]({{site.baseurl}}/docs/example-catalog/ci-examples/decryption-with-mozilla-sops/) +[Vault Secrets in the Pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline/) + diff --git a/_docs/example-catalog/ci-examples/golang-hello-world.md b/_docs/example-catalog/ci-examples/golang-hello-world.md new file mode 100644 index 000000000..8c3e0c3f3 --- /dev/null +++ b/_docs/example-catalog/ci-examples/golang-hello-world.md @@ -0,0 +1,269 @@ +--- +title: "Create a Docker image for GO" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/go/cf-example-golang-hello-world/ +toc: true +--- + +Codefresh can work with Go projects of any version using built-in modules or any other dependency mechanism. + +## The example golang project + +You can see the example project at [https://github.com/codefresh-contrib/golang-sample-app](https://github.com/codefresh-contrib/golang-sample-app){:target="\_blank"}. The repository contains a simple Golang web application including unit tests. There are 3 Dockerfiles available: + +* [Simple Dockerfile](https://github.com/codefresh-contrib/golang-sample-app/blob/master/Dockerfile){:target="\_blank"} (with old Go version that requires `GOPATH` building) +* [Dockerfile with Go modules](https://github.com/codefresh-contrib/golang-sample-app/blob/master/Dockerfile.mod){:target="\_blank"} (optimized for Docker caching) +* [Multi-stage Dockerfile](https://github.com/codefresh-contrib/golang-sample-app/blob/master/Dockerfile.multistage){:target="\_blank"} (with Go modules and unit tests) + +Let's see these workflows in order. + +## Simple Docker image pipeline + +The most [simple pipeline](https://github.com/codefresh-contrib/golang-sample-app/blob/master/codefresh.yml){:target="\_blank"} that you can create is just two steps: +* A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to fetch the code +* A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create a Docker image + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-golang-image + working_directory: ./ + tag: full + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Once you run this pipeline Codefresh will create a Docker image for the Golang application: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/golang-simple-pipeline.png" +url="/images/learn-by-example/golang/golang-simple-pipeline.png" +alt="Simple pipeline for Golang" +caption="Simple pipeline for Golang" +max-width="80%" +%} + +The big advantage of this workflow is that the Dockerfile you use can define any Go version and dependency tool. As long as the Dockerfile is self-contained (i.e. it compiles GO on its own), the pipeline will work as expected. + +In the example application, the simple (unoptimized) Dockerfile has an old Go version that still requires `GOPATH` folders. + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.10 + +# Set the Current Working Directory inside the container +WORKDIR $GOPATH/src/github.com/codefresh-contrib/go-sample-app + +# Copy everything from the current directory to the PWD (Present Working Directory) inside the container +COPY . . + +# Download all the dependencies +RUN go get -d -v ./... + +# Install the package +RUN go install -v ./... + +# This container exposes port 8080 to the outside world +EXPOSE 8080 + +# Run the executable +CMD ["go-sample-app"] +{% endraw %} +{% endhighlight %} + + +## Run unit tests as part of the pipeline + +If you want to run Go specific steps in your pipeline, you can use [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) steps with any GO image that you want. If your GO application is using GO modules, this is even easier as you don't need to place the application into a specific GOPATH compliant directory first. + +This [pipeline](https://github.com/codefresh-contrib/golang-sample-app/blob/master/codefresh-gomod.yml){:target="\_blank"} is running unit tests as a separate step and then builds the docker image. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - test + - build +steps: + main_clone: + title: Cloning main repository... + type: git-clone + stage: checkout + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyUnitTests: + title: Unit test + stage: test + image: 'golang:1.12' + commands: + - go test -v + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: my-golang-image + working_directory: ./ + tag: modules + dockerfile: Dockerfile.mod +{% endraw %} +{% endhighlight %} + +If the unit tests fail, then the docker image will never be created (Codefresh automatically stops a pipeline when there is an error). + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/golang-ci-pipeline.png" +url="/images/learn-by-example/golang/golang-ci-pipeline.png" +alt="Golang pipeline with unit tests" +caption="Golang pipeline with unit tests" +max-width="80%" +%} + +Notice that in this case we have added module support in the Go application. The new Dockerfile is the following: + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.12-alpine + +RUN apk add --no-cache git + +# Set the Current Working Directory inside the container +WORKDIR /app/go-sample-app + +# We want to populate the module cache based on the go.{mod,sum} files. +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +# Build the Go app +RUN go build -o ./out/go-sample-app . + + +# This container exposes port 8080 to the outside world +EXPOSE 8080 + +# Run the binary program produced by `go install` +CMD ["./out/go-sample-app"] +{% endraw %} +{% endhighlight %} + +The Dockerfile will also automatically take advantage of the Codefresh distributed docker cache. + + + +## Create a multi-stage Docker image for GO + +Especially with Go applications, the recommended way to create Docker images is with [multi-stage builds](https://docs.docker.com/develop/develop-images/multistage-build/){:target="\_blank"}. This makes the resulting Docker image as compact as possible. + +You can also embed unit tests in the Docker creation process, which guarantee the correctness of image (integration tests are best kept in the pipeline). + +Here is the new Dockerfile: + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.12-alpine AS build_base + +RUN apk add --no-cache git + +# Set the Current Working Directory inside the container +WORKDIR /tmp/go-sample-app + +# We want to populate the module cache based on the go.{mod,sum} files. +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +# Unit tests +RUN CGO_ENABLED=0 go test -v + +# Build the Go app +RUN go build -o ./out/go-sample-app . + +# Start fresh from a smaller image +FROM alpine:3.9 +RUN apk add ca-certificates + +COPY --from=build_base /tmp/go-sample-app/out/go-sample-app /app/go-sample-app + +# This container exposes port 8080 to the outside world +EXPOSE 8080 + +# Run the binary program produced by `go install` +CMD ["/app/go-sample-app"] +{% endraw %} +{% endhighlight %} + +Codefresh has native support for multi-stage builds. The [pipeline](https://github.com/codefresh-contrib/golang-sample-app/blob/master/codefresh-multi-stage.yml){:target="\_blank"} is the same as the first one with just two steps. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Multi-stage Image + type: build + image_name: my-golang-image + working_directory: ./ + tag: multi-stage + dockerfile: Dockerfile.multistage +{% endraw %} +{% endhighlight %} + +You should see a much smaller Docker image at the end. + + +## Viewing Docker images + +If you look at your [Docker registry dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images) created the advantages of the multi-stage build are very clear: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/golang-image-size.png" +url="/images/learn-by-example/golang/golang-image-size.png" +alt="Creating different Docker images" +caption="Creating different Docker images" +max-width="80%" +%} + +We recommend using Go modules and multi-stage builds in your Go projects. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + diff --git a/_docs/example-catalog/ci-examples/golang.md b/_docs/example-catalog/ci-examples/golang.md new file mode 100644 index 000000000..468e4eb87 --- /dev/null +++ b/_docs/example-catalog/ci-examples/golang.md @@ -0,0 +1,14 @@ +--- +title: "Go" +description: "How to build Golang applications with Codefresh CI/CD pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/go/ + - /docs/golang/ +toc: true +--- +This section contains Codefresh examples based on Go. + +- [Golang Docker Example]({{site.baseurl}}/docs/learn-by-example/golang/golang-hello-world/) +- [Golang with goreleaser]({{site.baseurl}}/docs/learn-by-example/golang/goreleaser/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/goreleaser.md b/_docs/example-catalog/ci-examples/goreleaser.md new file mode 100644 index 000000000..23cf3611a --- /dev/null +++ b/_docs/example-catalog/ci-examples/goreleaser.md @@ -0,0 +1,118 @@ +--- +title: "Compile and release a Go application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Goreleaser](https://github.com/goreleaser/goreleaser){:target="\_blank"} is a helper utility that allows you to easily create the following for Go applications: + +* Binary packages for each OS/arch +* Archives +* GitHub releases +* Docker images +* Snap/RPM/deb/Homebrew + + +Codefresh can also create Docker images on its own, but Goreleaser is still useful for the binary artifact creation capability. + + +## Run Goreleaser with docker + +You can see the example project at [https://github.com/codefresh-contrib/goreleaser-sample-app](https://github.com/codefresh-contrib/goreleaser-sample-app){:target="\_blank"}. The repository contains a simple Golang web application with a [goreleaser configuration](https://github.com/codefresh-contrib/goreleaser-sample-app/blob/master/.goreleaser.yml){:target="\_blank"}. + + +There is already a [Docker image for Goreleaser](https://hub.docker.com/r/goreleaser/goreleaser/){:target="\_blank"} so it is very easy to use it in Codefresh pipeline. +In the most simple case you case run goreleaser in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + `YAML` +{% highlight yaml %} +{% raw %} + ReleaseMyApp: + title: Creating packages + stage: release + image: 'goreleaser/goreleaser' + commands: + - goreleaser --snapshot --skip-publish --rm-dist +{% endraw %} +{% endhighlight %} + +More typically however you also need to provide a GitHub token so that GitHub releases are also available. There are two ways to do that. + + +## Create a CI pipeline that compiles/releases Go + +In most cases you want to just reuse the Git integration already defined in Codefresh. +This [pipeline](https://github.com/codefresh-contrib/goreleaser-sample-app/blob/master/codefresh.yml){:target="\_blank"} is using the GitHub token from [Git integration]({{site.baseurl}}/docs/integrations/git-providers/) in order to allow GitHub access. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - release +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: prepare + BuildMyApp: + title: Compiling go code + stage: build + image: 'golang:1.12' + commands: + - go build + GetGitToken: + title: Reading GitHub token + stage: release + image: codefresh/cli + commands: + - cf_export GITHUB_TOKEN=$(codefresh get context github-1 --decrypt -o yaml | yq -y .spec.data.auth.password) + ReleaseMyApp: + title: Creating packages + stage: release + image: 'goreleaser/goreleaser' + commands: + - goreleaser --rm-dist +{% endraw %} +{% endhighlight %} + +Note that GoReleaser [requires a GitHub API token](https://goreleaser.com/environment/){:target="\_blank"} (`GITHUB_TOKEN`) with the `repo` scope to deploy artifacts to GitHub. +Here we use [cf_export]({{site.baseurl}}/docs/pipelines/variables/#exporting-environment-variables-from-a-freestyle-step) and the [codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} in order to ask Codefresh about the existing token (that was used in git integrations). In your case you need to change `github-1` with the name of your [GitHub integration]({{site.baseurl}}/docs/integrations/git-providers/). + +It also possible to pass a GITHUB_TOKEN directly in the pipeline, if you don't want to re-use the existing one. This is an alternative way of allowing Goreleaser to create GitHub releases. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/github-token.png" +url="/images/learn-by-example/golang/github-token.png" +alt="Passing a specific github token in the pipeline" +caption="Passing a specific github token in the pipeline" +max-width="70%" +%} + +You could also store the token in [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). +Regardless of the way you choose to pass the GitHub token, the final step is to make sure that your pipeline is only executed for tag events. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/tags-only-trigger.png" +url="/images/learn-by-example/golang/tags-only-trigger.png" +alt="Run pipeline only on tag creation" +caption="Run pipeline only on tag creation" +max-width="80%" +%} + +This means that this pipeline will not run on normal commits. It is also possible to use [step conditionals]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) for more complex cases. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/gradle.md b/_docs/example-catalog/ci-examples/gradle.md new file mode 100644 index 000000000..73bc26ee3 --- /dev/null +++ b/_docs/example-catalog/ci-examples/gradle.md @@ -0,0 +1,207 @@ +--- +title: "Java Example with Gradle and Docker" +description: "Create Docker images for Spring/Gradle" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/java/gradle/ +toc: true +--- + +Codefresh can work with Gradle builds in a similar manner as with [Maven builds]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/){:target="\_blank"}. + +## The example Gradle project + +You can see the example project at [https://github.com/codefresh-contrib/gradle-sample-app](https://github.com/codefresh-contrib/gradle-sample-app){:target="\_blank"}. The repository contains a Spring Boot 2 project built with Gradle with the following tasks: + +* `gradle test` runs unit tests. +* `gradle build` creates a self-contained jar file (using Spring boot). + +Once launched the application presents a simple message at localhost:8080 and also at the various `/actuator/health` endpoints. + +## Gradle and Docker (multi-stage builds) + +The easiest way to use Gradle is with [multi-stage builds](https://blog.docker.com/2017/07/multi-stage-builds/){:target="\_blank"}. With multi-stage builds a Docker build can use one base image for compilation/packaging/unit tests and a different one that will hold the runtime of the application. This makes the final image more secure and smaller in size (as it does not contain any development/debugging tools). + +In the case of Gradle, you can use a base image that has the full JDK and Gradle itself, while the final image has the JRE and nothing else. + +The example project is actually using multi-stage builds by default. + +Here is the multi-stage Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM gradle:4.7.0-jdk8-alpine AS build +COPY --chown=gradle:gradle . /home/gradle/src +WORKDIR /home/gradle/src +RUN gradle build --no-daemon + +FROM openjdk:8-jre-slim + +EXPOSE 8080 + +RUN mkdir /app + +COPY --from=build /home/gradle/src/build/libs/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java", "-XX:+UnlockExperimentalVMOptions", "-XX:+UseCGroupMemoryLimitForHeap", "-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the Gradle image +1. Copies the Java source code inside the container +1. Compiles the code and runs unit tests (with `Gradle build`) +1. Discards the Gradle image with all the compiled classes/unit test results etc. +1. Starts again from the JRE image and copies **only** the JAR file created before + +We start Gradle without the long-running daemon, as the deamon is best used during local development only and not in CI/CD pipelines. + +### Create a CI pipeline for Gradle (multi-stage Docker builds) + +Because in multi-stage builds Docker itself handles most of the build process, moving the project to Codefresh is straightforward. We just need [a single step](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/gradle-sample-app' + revision: master + git: github + BuildingDockerImage: + title: Building Docker Image + stage: build + type: build + image_name: gradle-sample-app + working_directory: ./ + tag: 'multi-stage' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This will compile/test/package the Gradle application and create a Docker image. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/gradle-multistage.png" +url="/images/learn-by-example/java/gradle-multistage.png" +alt="Gradle Multi-stage Docker build" +caption="Gradle Multi-stage Docker build" +max-width="80%" +%} + +Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +## Packaging an existing Jar in a Docker image + +It also possible to have a simpler Dockerfile that only packages the final jar which was already created in the CI/CD pipeline (i.e. outside of Docker). + +A [simpler Dockerfile](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/Dockerfile.only-package){:target="\_blank"} is also provided at the same repository. It uses the base JRE image and just copies the JAR file inside the container. + + `Dockerfile.only-package` +{% highlight docker %} +{% raw %} +FROM openjdk:8-jre-slim + +EXPOSE 8080 + +RUN mkdir /app + +COPY build/libs/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java", "-XX:+UnlockExperimentalVMOptions", "-XX:+UseCGroupMemoryLimitForHeap", "-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] +{% endraw %} +{% endhighlight %} + +This means that _before_ building the Docker image, the compilation step (`gradle build`) is expected to be finished already. Therefore, in the `codefresh.yml` file we need at least two steps. The first one should prepare the JAR file and the second +one should create the Docker image. + +### Create a CI pipeline for a Gradle JAR + +The repository also contains a premade [Codefresh YAML file](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh-package-only.yml){:target="\_blank"} that creates a JAR file first and then packages it in a Docker image. + +Here are the full contents of the file. + + `codefresh-package-only.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - package + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/gradle-sample-app' + revision: master + git: github + MyUnitTests: + title: Compile/Unit test + stage: test + image: gradle:4.7.0-jdk8-alpine + commands: + - gradle test --no-daemon --build-cache --gradle-user-home=/codefresh/volume/.gradle -Dmaven.repo.local=/codefresh/volume/m2 + BuildMyJar: + title: Packaging Jar file + stage: package + image: gradle:4.7.0-jdk8-alpine + commands: + - gradle build --no-daemon --build-cache --gradle-user-home=/codefresh/volume/.gradle -Dmaven.repo.local=/codefresh/volume/m2 + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: gradle-sample-app + working_directory: ./ + tag: 'non-multi-stage' + dockerfile: Dockerfile.only-package +{% endraw %} +{% endhighlight %} + +The pipeline starts by checking out the code using a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). The next two steps are [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/), while the last one is a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/gradle-ci-pipeline.png" +url="/images/learn-by-example/java/gradle-ci-pipeline.png" +alt="Gradle pipeline" +caption="Gradle pipeline" +max-width="80%" +%} + +After checking out the code we use the standard [Gradle Docker image](https://hub.docker.com/_/gradle/){:target="\_blank"} to run unit tests. We also pass parameters that disable the Gradle daemon, enable the build cache and also change the cache folder to reside in the Codefresh volume. + +### Using the Gradle cache in Codefresh + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#how-caching-works-in-codefresh) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for Maven/Gradle which keep their cache externally. By changing the location of the Gradle cache we make sure that Codefresh will cache automatically the Gradle libraries resulting in much faster builds. We also place in the shared volume the local maven repo so that all jars that are created by Gradle (i.e. with an `install` task) are also available to the next pipeline stage. + +The next step is similar to the previous one, but this time we actually build the JAR file. We define again a custom cache folder so when you run the build you will see that Gradle will automatically pick the cache from the previous step. All Codefresh steps in a pipeline [run on the same workspace]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps), so the build results from one step are visible to the next. + +The last step is a Docker build. We name our image **gradle-sample-app** and tag it with a string `non-multi-stage` but of course you can use any other tag name that you wish. +Once the pipeline is finished you will see the Spring Boot 2 Docker image your [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images). + +## Related articles +[Spring Maven example]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/ci-examples/import-data-to-mongodb.md b/_docs/example-catalog/ci-examples/import-data-to-mongodb.md new file mode 100644 index 000000000..223ecc6cd --- /dev/null +++ b/_docs/example-catalog/ci-examples/import-data-to-mongodb.md @@ -0,0 +1,60 @@ +--- + +title: "Import data to MongoDB" +description: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/import-data-to-mongodb-in-composition/ + - /docs/on-demand-test-environment/example-compositions/import-data-to-mongodb/ +toc: true +--- + +To import, restore, or for any operation before using MongoDB in your application, look at the following example. + +You just need to create Dockerfile for Mongo seed service and provide the command to prepare MongoDB. In this case, the command is `mongoimport`. + + `Dockerfile mongo_seed` +{% highlight docker %} +FROM mongo +COPY init.json /init.json +CMD mongoimport --host mongodb --db exampleDb --collection contacts --type json --file /init.json --jsonArray +{% endhighlight %} + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Let's quickly review the contents of this file: + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + mongodb: + image: mongo + command: mongod --smallfiles + ports: + - 27017 + + mongo_seed: + image: ${{mongo_seed}} + links: + - mongodb + + client: + image: ${{build_prj}} + links: + - mongodb + ports: + - 9000 + environment: + - MONGO_URI=mongodb:27017/exampleDb +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +You can add the following example to your GitHub or Bitbucket account, and build the [example](https://github.com/codefreshdemo/cf-example-manage-mongodb){:target="_blank"}. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-mongo.md b/_docs/example-catalog/ci-examples/integration-tests-with-mongo.md new file mode 100644 index 000000000..1947496a9 --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-mongo.md @@ -0,0 +1,101 @@ +--- +title: "Integration Tests with Mongo" +description: "Launching a MongoDB service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/nodejsmongo/ + - /docs/testing/unit-tests/unit-tests-with-mongo/ +toc: true +--- + +In this example, we will see a NodeJS project that uses MongoDB for data storage. For the integration test phase we will launch an instance of MongoDB in order to run a set of [Mocha tests](https://mochajs.org/){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/mongodb-integration-tests.png" +url="/images/examples/integration-tests/mongodb-integration-tests.png" +alt="MongoDB integration tests with Codefresh" +caption="MongoDB integration tests with Codefresh" +max-width="90%" +%} + +The Mocha tests are looking for a MongoDB connection at `mongo:27017`. + +## The example NodeJS project + +You can see the example project at [https://github.com/codefreshdemo/example_nodejs_mongo](https://github.com/codefreshdemo/example_nodejs_mongo){:target="\_blank"}. The repository contains the NodeJS source code and the Mocha tests. + +You can play with it locally by using Docker compose to launch both the application and the MongoDB datastore. + +## Create a pipeline with MongoDB integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/example_nodejs_mongo" + revision: "master" + git: github + stage: prepare + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "node-mongo-app" + tag: "master" + dockerfile: "Dockerfile" + stage: build + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_app_image}}' + environment: + - MONGO_PORT=27017 + commands: + # MongoDB is certainly up at this point + - cd /src + - npm test + services: + composition: + mongo: + image: mongo:latest + ports: + - 27017 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: '${{build_app_image}}' + commands: + - "nslookup mongo" + - "nc -z mongo 27017" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with the application source code as well as the Mocha tests through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Runs Mocha tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active MongoDB instance + +Notice that we also use the `readiness` property in the testing phase so that we can verify MongoDB is ready and listening, before running the tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) + + + + diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-mysql.md b/_docs/example-catalog/ci-examples/integration-tests-with-mysql.md new file mode 100644 index 000000000..cccb9a43b --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-mysql.md @@ -0,0 +1,110 @@ +--- +title: "Integration Tests with MySQL" +description: "Launching a MySQL service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/nodejsmysql/ + - /docs/testing/unit-tests/unit-tests-with-mysql/ + - /docs/setup-unit-tests/ + - /docs/testing/unit-tests/unit-tests-with-composition/ + - /docs/run-unit-tests-with-composition/ + - /docs/unit-tests-with-database/ + - /docs/testing/unit-tests/unit-tests-with-database/ + - /docs/example-catalog/ci-examples/integration-tests-with-database/ +toc: true +--- + +In this example, we will see a NodeJS project that is using MySQL for data storage. For the integration test phase we will launch an instance of MySQL in order to run a simple integration test. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/mysql-integration-tests.png" +url="/images/examples/integration-tests/mysql-integration-tests.png" +alt="MySQL integration tests with Codefresh" +caption="MySQL integration tests with Codefresh" +max-width="90%" +%} + +The integration tests look for a MySQL connection at `test_mysql_db:3306`. + +## Example NodeJS project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-unit-tests-with-composition](https://github.com/codefreshdemo/cf-example-unit-tests-with-composition){:target=\_blank"}. The repository contains the NodeJS source code and the simple integration test. + +You can play with it locally by using Docker compose to launch both the application and the MySQL Database. + +## Create a pipeline with MySQL integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/cf-example-unit-tests-with-composition" + revision: "master" + git: github + stage: prepare + build_test_image: + title: "Building Test Docker Image" + type: "build" + image_name: "mysql-tests" + tag: "master" + dockerfile: "Dockerfile" + stage: build + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_test_image}}' + environment: &test_mysql_vars + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_USER=my_user + - MYSQL_PASSWORD=admin + - MYSQL_DATABASE=nodejs + - MYSQL_HOST=test_mysql_db + commands: + # MySQL is certainly up at this point + - cd /usr/src/app + - npm test + services: + composition: + test_mysql_db: + image: mysql:5.7 + ports: + - 3306 + environment: *test_mysql_vars # Same MYSQL_HOST, MYSQL_USER etc. + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: '${{build_test_image}}' + commands: + - "nslookup test_mysql_db" + - "nc -z test_mysql_db 3306" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with the integration test through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Runs the tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active MySQL instance passing the required environment variables (that match what the test is expecting). + +Notice that both the DB as well as the tests share a set of variables (`MYSQL_PASSWORD`, `MYSQL_USER` etc.) and thus we use [YAML anchors]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#using-yaml-anchors-to-avoid-repetition) to avoid duplication. + +Notice that we also use the `readiness` property in the testing phase so that we can verify MySQL is ready and listening, before running the tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-postgres.md b/_docs/example-catalog/ci-examples/integration-tests-with-postgres.md new file mode 100644 index 000000000..ee2a41106 --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-postgres.md @@ -0,0 +1,99 @@ +--- +title: "Integration Tests with Postgres" +description: "Launching a PostgreSQL service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/unit-tests-with-postgres/ + - /docs/testing/unit-tests/unit-tests-with-postgres/ +toc: true +--- + +In this example, we will see a NodeJS project that is using PostgreSQL for data storage. For the integration test phase we will launch an instance of PostgreSQL in order to run a simple integration test. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/postgresql-integration-tests.png" +url="/images/examples/integration-tests/postgresql-integration-tests.png" +alt="PostgreSQL integration tests with Codefresh" +caption="PostgreSQL integration tests with Codefresh" +max-width="90%" +%} + +The integration tests look for a PostgreSQL connection at `postgres:5432`. + +## Example NodeJS project + +You can see the example project at [https://github.com/codefreshdemo/example_nodejs_postgres](https://github.com/codefreshdemo/example_nodejs_postgres){:target="\_blank"}. The repository contains the NodeJS source code and the simple integration test. + +You can play with it locally by using Docker compose to launch both the application and the PostgreSQL Database. + +## Create a pipeline with PostgreSQL integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/example_nodejs_postgres" + revision: "master" + git: github + stage: prepare + run_integration_tests: + title: "Running integration tests" + stage: test + image: node:6.9.1 + environment: &test_postgresql_vars + - POSTGRES_USER=user + - POSTGRES_PASSWORD=admin + - POSTGRES_DB=todo + commands: + # PostgreSQL is certainly up at this point + - npm install -g gulp + - npm install + - npm test + services: + composition: + postgres: + image: postgres:11.5 + ports: + - 5432 + environment: *test_postgresql_vars # Same POSTGRES_USER, POSTGRES_PASSWORD etc. + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: postgres:11.5 + commands: + - "pg_isready -h postgres" + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Runs the tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active PostgreSQL instance passing the required environment variables (that match what the test is expecting). + +Notice that both the DB as well as the tests share a set of variables (`POSTGRES_USER`, `POSTGRES_PASSWORD` etc.) and thus we use [YAML anchors]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#using-yaml-anchors-to-avoid-repetition) to avoid duplication. + +Notice that we also use the `readiness` property in the testing phase so that we can verify PostgreSQL is ready and listening, before running the tests. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +[Preload a DB with tests data]({{site.baseurl}}/docs/example-catalog/ci-examples/populate-a-database-with-existing-data/) + + diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-redis.md b/_docs/example-catalog/ci-examples/integration-tests-with-redis.md new file mode 100644 index 000000000..027a57109 --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-redis.md @@ -0,0 +1,129 @@ +--- +title: "Integration Tests with Redis" +description: "Launching a Redis service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/python-redis/ + - /docs/testing/unit-tests/unit-tests-with-redis/ +toc: true +--- + +In this example, we will see a Python project that is using Redis for storing a web counter. For the integration test phase we will launch both the application and an instance of Redis in order to run a simple integration test. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/redis-integration-tests.png" +url="/images/examples/integration-tests/redis-integration-tests.png" +alt="Redis integration tests with Codefresh" +caption="Redis integration tests with Codefresh" +max-width="90%" +%} + +The application will be launched with a hostname `web` while Redis will be at `redis:6379`. + +## Example Python project + +You can see the example project at [https://github.com/codefreshdemo/example_python_redis](https://github.com/codefreshdemo/example_python_redis){:target="\_blank"}. The repository contains the Python source code and a test script. + +You can play with it locally by using Docker compose to launch both the application and the Redis datastore. + +## Create a pipeline with Redis integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/example_python_redis" + revision: "master" + git: github + stage: prepare + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "python-redis-app" + tag: "latest" + dockerfile: "Dockerfile" + stage: build + build_test_image: + title: "Building Docker Test Image" + type: "build" + image_name: "python-redis-app-tests" + tag: "latest" + dockerfile: "Dockerfile.test" + stage: test + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_test_image}}' + commands: + # Redis and app are certainly up at this point + - sh ./test.sh + services: + composition: + redis: + image: redis:latest + ports: + - 6379 + web: + image: '${{build_app_image}}' + ports: + - 80 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: '${{build_test_image}}' + commands: + - "nslookup redis" + - "nslookup web" + - "nc -z redis 6379" + - "nc -z web 80" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with the application itself through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Builds a helper image that contains `nc` and `curl` that will be used for the integration tests. +1. Runs the test script while launching two [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) (one for the app and one for Redis). + +Notice that we also use the `readiness` property in the testing phase so that we can verify that both the application +as well as Redis are up, before running the tests. + +## Integration test script + +The integration test is very simple. It just uses `curl` to hit the Python endpoint and `grep` to check for a well known string. + + `test.sh` +{% highlight sh %} +#!bin/bash + +if curl web | grep -q 'Visits: '; then + echo "Tests passed!" + exit 0 +else + echo "Tests failed!" + exit 1 +fi +{% endhighlight %} + +Notice that we use the helper image both for running the test (because of `curl`) and for testing the readiness (because of `nc`). In a more complex application these could be two completely different images. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) diff --git a/_docs/example-catalog/ci-examples/java.md b/_docs/example-catalog/ci-examples/java.md new file mode 100644 index 000000000..c28cd55c5 --- /dev/null +++ b/_docs/example-catalog/ci-examples/java.md @@ -0,0 +1,15 @@ +--- +title: "Java" +description: "" +group: example-catalog +redirect_from: + - /docs/java/ +toc: true +--- +This section contains Codefresh examples based on Java: + +- [Spring Boot 2 with Maven]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/) +- [Gradle]({{site.baseurl}}/docs/learn-by-example/java/gradle/) +- [Publish a JAR]({{site.baseurl}}/docs/learn-by-example/java/publish-jar/) +- [Spring MVC JDBC Template]({{site.baseurl}}/docs/learn-by-example/java/spring-mvc-jdbc-template/) + diff --git a/_docs/example-catalog/ci-examples/launch-composition.md b/_docs/example-catalog/ci-examples/launch-composition.md new file mode 100644 index 000000000..4b010f39b --- /dev/null +++ b/_docs/example-catalog/ci-examples/launch-composition.md @@ -0,0 +1,85 @@ +--- +title: "Launch Compositions" +description: "Create a dynamic environment to preview your feature" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/launch-composition-1/ +toc: true +--- +Using this repository, we will help you get up to speed with basic functionality such as: building Docker images and launching compositions. +This project uses `Node JS` to build an application which will eventually become a distributable Docker image. + +## Looking around + +In the root of this repository you'll find a file named `codefresh.yml`. This is our [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) and it describes the different steps that comprise our process. Let's quickly review the contents of this file: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +stages: + - prepare + - package + - launch +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: codefreshdemo/cf-example-launch-composition + revision: 'master' + git: github + stage: prepare + build_image: + title: Building Image + type: build + #Important: rename this image to to a valid repository in your registry. For example: myUserName/vote + image_name: example-launch-compose + #Dockerfile location should be relative to the working directory + dockerfile: Dockerfile + tag: master + stage: package + launch_composition: + title: Launch Composition + type: launch-composition + composition: + version: '2' + services: + app: + image: example-launch-compose:master + ports: + - 3000 + environment_name: 'cf-example-launch-composition' + entry_point: app + fail_fast: false + stage: launch +{% endhighlight %} + +The pipeline clones the source code, builds a docker image and then + [creates a preview environment]({{site.baseurl}}/docs/pipelines/steps/launch-composition/) with that image. + + +>**Your environments are limited** + Be aware that the number of environments you can run is limited. When using the same environment, define that the old one would terminate before launching the new environment. That way you can control the number of environments running in your account. + + +### Example + +Just head over to the example [**repository**](https://github.com/codefreshdemo/cf-example-launch-composition){:target=\_blank"} in GitHub and follow the instructions there. + + +Here is the end result: + +{% include image.html +lightbox="true" +file="/images/examples/composition/launch-composition-example.png" +url="/images/examples/composition/launch-composition-example.png" +alt="Launch composition example" +caption="Launch composition example" +max-width="90%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Unit tests]({{site.baseurl}}/docs/examples/example-catalog/ci-examples/run-integration-tests/) +[Integration tests]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-database/) +[Preview environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file.md b/_docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file.md new file mode 100644 index 000000000..47996ba42 --- /dev/null +++ b/_docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file.md @@ -0,0 +1,59 @@ +--- +title: "Use Docker compose" +description: "Launch a composition and define a service environment variable using a file" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/launching-a-composition-and-passing-a-service-environment-variable-using-a-file/ +toc: true +old_url: /docs/launching-a-composition-and-passing-a-service-environment-variable-using-a-file +--- +At times when launching a composition, you need to pass many environment variables to a specific service. +To do so, you can use `docker-compose 'env_file'` field on any service, and use files from the current working directory from which the composition is being launched. +This works for both `composition` and `launch-composition` step types. + +>**Note**: + When launching a composition directly from the Compositions view, using `env_file` does not work as it is being launched in an empty working directory. + Consider moving the composition launch as part of a usual pipeline which will give you ability to use files from your cloned repository. + + +## Examples +Compositions are launched within a working directory, which is the cloned repository by default. +This means that you can always reference an `env_file` just as would reference a `docker-compose` file. + + `Inline Composition` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + + inline_composition: + title: Launch inline composition + type: launch-composition + environment_name: 'environment name' + composition: + version: '3' + services: + service: + image: alpine + env_file: ./env-file +{% endraw %} +{% endhighlight %} + + + `Composition from file` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + + composition_from_file: + title: Launch composition from file + type: launch-composition + composition: './docker-compose.yml' + environment_name: 'environment name' +{% endraw %} +{% endhighlight %} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/lets-chat.md b/_docs/example-catalog/ci-examples/lets-chat.md new file mode 100644 index 000000000..b14e965ba --- /dev/null +++ b/_docs/example-catalog/ci-examples/lets-chat.md @@ -0,0 +1,121 @@ +--- +title: "Let's Chat example" +description: "Create Docker images for Node/Express.js applications" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/lets-chat/ +toc: true +--- + +Let’s Chat is self-hosted chat app for small to big teams. + +## The example Node.JS project + +You can see the example project at [https://github.com/codefreshdemo/demochat](https://github.com/codefreshdemo/demochat){:target="\_blank"}. The repository contains the source code of the project along with two Dockerfiles (one for unit tests) and various docker-compose configurations + +The project requires a Mongo Database to work and by default it uses port 5000 for its web interface. + +## Create a CI pipeline for Node.js + +Creating a CI/CD pipeline for NodeJS is very easy, because Codefresh has built-in steps for creating Docker images and running commands with containers. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/nodejs/nodejs-pipeline.png" +url="/images/learn-by-example/nodejs/nodejs-pipeline.png" +alt="Building and testing a Node.js application" +caption="Building and testing a Node.js application" +max-width="100%" +%} + +Here is the [full pipeline](https://github.com/codefreshdemo/demochat/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "unit" + - "build" + - "integration" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefreshdemo/demochat" + revision: "master" + stage: "clone" + + build_dev_image: + title: "Building Dev image" + type: "build" + image_name: "codefreshdemo/demochat" + working_directory: "${{clone}}" + tag: "dev" + dockerfile: "Dockerfile.dev" + stage: "unit" + + test: + title: "Running test" + type: "freestyle" + image: ${{build_dev_image}} + working_directory: /root/demochat + commands: + - 'npm run test' + stage: "unit" + + build_image: + title: "Building App image" + type: "build" + image_name: "codefreshdemo/demochat" + working_directory: "${{clone}}" + tag: "dev" + dockerfile: "Dockerfile" + stage: "build" + + integration_step: + type: composition + stage: 'integration' + composition: + version: '2' + services: + app: + image: ${{build_image}} + links: + - mongo + ports: + - 5000 + mongo: + image: mongo + composition-candidates: + main: + image: nhoag/curl + command: bash -c "sleep 30 && curl http://app:5000/" | echo 'works' + +{% endraw %} +{% endhighlight %} + +> Note that you should change `codefreshdemo` in the clone step with your own Github account if you fork the repository. Also in both build steps you should change `codefreshdemo/demochat` with your own image name that is compliant to your Dockerhub account or other connected registry. + +This pipeline has 4 [stages]({{site.baseurl}}/docs/pipelines/stages/) and performs the following: + + 1. Clones the source code using the [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step + 1. Builds a Docker image for unit tests with the [build step]({{site.baseurl}}/docs/pipelines/steps/build/) + 1. Runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) in the Docker image that was just created with a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + 1. Building a Docker image for the final application + 1. Runs [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) using a [composition step]({{site.baseurl}}/docs/pipelines/steps/composition/) + +If you run the pipeline multiple times, you will also see the [Codefresh caching mechanisms]({{site.baseurl}}/docs/pipelines/pipeline-caching/) in action for faster build times. + +## Related articles +[Voting app example]({{site.baseurl}}/docs/example-catalog/ci-examples/voting-app/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + + diff --git a/_docs/example-catalog/ci-examples/mobile.md b/_docs/example-catalog/ci-examples/mobile.md new file mode 100644 index 000000000..e0c6f991d --- /dev/null +++ b/_docs/example-catalog/ci-examples/mobile.md @@ -0,0 +1,10 @@ +--- +title: "Mobile Apps" +description: "How to build Mobile applications with Codefresh CI/CD pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- +This section contains Codefresh examples for Mobile application. + +- [Android]({{site.baseurl}}/docs/learn-by-example/mobile/android/) diff --git a/_docs/example-catalog/ci-examples/nodejs.md b/_docs/example-catalog/ci-examples/nodejs.md new file mode 100644 index 000000000..4ed04ccde --- /dev/null +++ b/_docs/example-catalog/ci-examples/nodejs.md @@ -0,0 +1,15 @@ +--- +title: "Node.js" +description: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/nodejs/ +toc: true +--- + +This section contains Codefresh examples based on Node.js: + +- [Let's Chat]({{site.baseurl}}/docs/learn-by-example/nodejs/lets-chat/) - Express.js + Mongo Example +- [Voting app]({{site.baseurl}}/docs/learn-by-example/nodejs/voting-app/) - Microservices app with multiple programming languages +- [React JS app]({{site.baseurl}}/docs/learn-by-example/nodejs/react/) - React.JS + multi stage Docker build example \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/non-git-checkout.md b/_docs/example-catalog/ci-examples/non-git-checkout.md new file mode 100644 index 000000000..5f32d93b4 --- /dev/null +++ b/_docs/example-catalog/ci-examples/non-git-checkout.md @@ -0,0 +1,100 @@ +--- +title: "Checking out from other source control systems" +description: "Work with non-git repositories" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh has [native Git support]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/), but you can still use any other version control system such as SVN, CVS, hg, etc. + +The only requirement is that you find or create a Docker image that contains the client for that source control system and then use a +[freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) to run it. + +## Checking out Subversion code + +There is already a public [Docker image with the svn client](https://hub.docker.com/r/jgsqware/svn-client/){:target="\_blank"}, so it is very easy to run it in a Codefresh pipeline. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomCheckout: + title: Performing SVN checkout + image: jgsqware/svn-client + commands: + - pwd + - rm -rf audacity-svn + - svn checkout https://svn.code.sf.net/p/audacity/svn/ audacity-svn + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l /codefresh/volume/' +{% endraw %} +{% endhighlight %} + +Notice the `rm` command before the clone step. This makes sure that every time the pipeline runs, the `svn checkout` step is implemented in an empty directory. + + + +## Checking out Mercurial or CVS Code + +It is very simple to use any other source control system in a Codefresh pipeline. The easiest way is to just call the respective executable. Here are two examples: + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myHgStep: + title: Using HG + image: alpine:latest + commands: + - apk add --no-cache mercurial + - hg --version + - hg clone https://www.mercurial-scm.org/repo/hg mercurial-repo + myCvsStep: + title: Using CVS + image: alpine:latest + commands: + - apk add --no-cache cvs + - cvs --version + - cvs -d :pserver:anonymous@cvs.project-open.net:/home/cvsroot checkout -c +{% endraw %} +{% endhighlight %} + +A much faster way is to create your own Dockerfile that includes the client you need and then define that image directly in the freestyle step. + + +## Checking out Perforce code + +Codefresh has created a [Perforce plugin](https://hub.docker.com/r/codefresh/cf-p4-plugin/tags){:target="\_blank"} which packs the p4 client into a Docker image to be used from Codefresh pipelines: + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomCheckout: + title: Performing Perforce checkout + image: codefresh/cf-p4-plugin:latest + commands: + - mkdir -p /codefresh/volume/p4repo/ + - p4 client -o | grep -v '#' | sed '/Root:/c\Root:/codefresh/volume/p4repo/' | p4 client -i + - cd /codefresh/volume/p4repo/ && p4 rec + - 'ls -la' + environment: + - P4PORT=serveradress:serverport + - P4CLIENT=clientname + - P4USER=username + - P4PASSWD=password +{% endraw %} +{% endhighlight %} + +Define the environment variables in [Codefresh shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Native Git checkout]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) +[Running custom git commands]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout-custom/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) diff --git a/_docs/example-catalog/ci-examples/php.md b/_docs/example-catalog/ci-examples/php.md new file mode 100644 index 000000000..b447f0d59 --- /dev/null +++ b/_docs/example-catalog/ci-examples/php.md @@ -0,0 +1,135 @@ +--- +title: "Create a Docker image for Php" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with Php projects using any of the popular frameworks (Laravel, Symphony, CakePHp etc.) + +## The example php project + +You can see the example project at [https://github.com/codefresh-contrib/php-composer-sample-app](https://github.com/codefresh-contrib/php-composer-sample-app){:target="\_blank"}. The repository contains a simple Php project that uses [composer](https://getcomposer.org/) as a package manager. + +The dockerfile uses [multi-stage builds](https://docs.docker.com/develop/develop-images/multistage-build/){:target="\_blank"} to minimize the size of the docker image. + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM composer:1.9.3 as vendor + +WORKDIR /tmp/ + +COPY composer.json composer.json +COPY composer.lock composer.lock + +RUN composer install \ + --ignore-platform-reqs \ + --no-interaction \ + --no-plugins \ + --no-scripts \ + --prefer-dist + + +FROM php:7.2-apache-stretch + +COPY . /var/www/html +COPY --from=vendor /tmp/vendor/ /var/www/html/vendor/ +{% endraw %} +{% endhighlight %} + + +## Create a Docker image for Php project + +An [example pipeline](https://github.com/codefresh-contrib/php-composer-sample-app/blob/master/codefresh.yml){:target="\_blank"} is also offered in the git repository. +It contains just two [steps]({{site.baseurl}}/docs/pipelines/steps/): + +* A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to fetch the code +* A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create a Docker image + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/php-composer-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-php-image + working_directory: ./ + tag: master + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Once you run this pipeline Codefresh will create a Docker image for the Php application: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/php-cicd-pipeline.png" +url="/images/learn-by-example/php/php-cicd-pipeline.png" +alt="Creating a docker image for php" +caption="Creating a docker image for php" +max-width="80%" +%} + +Notice that all dependencies are downloaded when the dockerfile is created. + + + + +## Launch Docker images + +Codefresh can also launch Docker images (using Docker swarm behind the scenes). With each Codefresh account you get access to a limited number of Docker environments that can host any Docker image or Docker compose file. + +First find your images in the [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images). + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/launch-docker-image.png" +url="/images/learn-by-example/php/launch-docker-image.png" +alt="Launching a Docker image" +caption="Launching a Docker image" +max-width="80%" +%} + +Click on the launch button and a new pipeline will run for deployment: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/test-environment-url.png" +url="/images/learn-by-example/php/test-environment-url.png" +alt="Getting the environment url" +caption="Getting the environment url" +max-width="80%" +%} + +Notice that the pipeline logs show the dynamic URL of the application. Simply visit it with your browser +and you will see the result. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/test-environment.png" +url="/images/learn-by-example/php/test-environment.png" +alt="Application preview" +caption="Application preview" +max-width="80%" +%} + +Notice that these environments are only for testing and previewing your application as it is developed. They are **NOT** for production purposes. + + + +## Related articles + +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/ci-examples/populate-a-database-with-existing-data.md b/_docs/example-catalog/ci-examples/populate-a-database-with-existing-data.md new file mode 100644 index 000000000..14f27b147 --- /dev/null +++ b/_docs/example-catalog/ci-examples/populate-a-database-with-existing-data.md @@ -0,0 +1,153 @@ +--- +title: "Populate database with existing data" +description: "Preload test data before integration tests" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/populate-a-database-with-existing-data-copied/ +toc: true +old_url: /docs/populate-a-database-with-existing-data-copied +was_hidden: true +--- +In another example we saw how to run [integration tests with a database]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) such as PostgreSQL. Sometimes however, the integration tests require the database to already have some test data beforehand. With Codefresh you can use the [setup block]({{site.baseurl}}/docs/pipelines/service-containers/#preloading-data-to-databases) in service containers to preload data to a database. + + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/preload-data-to-db.png" +url="/images/examples/integration-tests/preload-data-to-db.png" +alt="Preloading test data to a DB" +caption="Preloading test data to a DB" +max-width="90%" +%} + +In this pipeline the database is populated with data from an SQL file. + +## Example PostgreSQL project + +You can see the example project at [https://github.com/codefresh-contrib/preload-db-integration-tests](https://github.com/codefresh-contrib/preload-db-integration-tests){:target="\_blank"}. The repository contains a simple integration test and an SQL file that inserts test data. + +The SQL file creates a single table in the database: + + `preload.sql` +{% highlight sql %} +{% raw %} +CREATE TABLE link ( + ID serial PRIMARY KEY, + url VARCHAR (255) NOT NULL, + name VARCHAR (255) NOT NULL, + description VARCHAR (255), + rel VARCHAR (50) +); + +INSERT INTO link (url, name) +VALUES + ('http://www.google.com','Google'), + ('http://www.azure.microsoft.com','Azure'), + ('http://www.codefresh.io','Codefresh'); +{% endraw %} +{% endhighlight %} + + +To work with the project locally, you need to have `docker`, `golang` and `postgres-client` installed on your workstation first. + +``` +$ docker run -p 5432:5432 postgres:11.5 +``` + +Then open another terminal and load the test data: + +``` +$ psql -h localhost -U postgres < testdata/preload.sql +``` + +A Postgres instance is now running at `localhost:5432` and you can run the tests with: + +``` +$ go test -v +``` + + +## Create a pipeline the preloads test data to PostgreSQL + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: +- prepare +- package +- test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefresh-contrib/preload-db-integration-tests" + revision: "master" + title: "Checking out source code" + git: github + stage: prepare + package_my_app: + stage: package + image: 'golang:1.13' + title: "Compile code" + commands: + - 'go build' + run_my_db_tests: + stage: test + image: 'golang:1.13' + title: "Running integration tests" + commands: + - 'go test -v' + environment: + - POSTGRES_HOST=my_postgresql_db + services: + composition: + my_postgresql_db: + image: postgres:11.5 + ports: + - 5432 + readiness: + timeoutSeconds: 30 + initialDelaySeconds: 10 + periodSeconds: 15 + image: 'postgres:11.5' + commands: + - "pg_isready -h my_postgresql_db -U postgres" + setup: + image: 'postgres:11.5' + commands: + - "psql -h my_postgresql_db -U postgres < /codefresh/volume/preload-db-integration-tests/testdata/preload.sql" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Compiles the code that runs `go build` through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Runs the tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active PostgreSQL instance. Before tests are run, we launch another container with the `psql` executable to load database data. + + +> In this simple example, we use `psql` to preload the database. In a production application you might also use dedicated db tools such as [liquibase](https://hub.docker.com/r/liquibase/liquibase){:target="\_blank"} or [flyway](https://hub.docker.com/r/flyway/flyway){:target="\_blank"} or other command line tools that communicate with your database. + +Notice that we also use the `readiness` property in the testing phase so that we can verify PostgreSQL is ready and listening, before running the tests. The exact order of events is: + +1. Codefresh launches `postgres:11.5` at port 5432. +1. It then launches another container in the same network with `pg_isready` in order to wait for the DB to be up. +1. Then it launches a third container with `psql` to preload data. +1. Finally, it launches a container with `golang:1.13` to run the actual tests. + +All containers are discarded after the pipeline has finished. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) + + + diff --git a/_docs/example-catalog/ci-examples/publish-jar.md b/_docs/example-catalog/ci-examples/publish-jar.md new file mode 100644 index 000000000..47add3699 --- /dev/null +++ b/_docs/example-catalog/ci-examples/publish-jar.md @@ -0,0 +1,116 @@ +--- +title: "Publish Jar" +description: "How to upload a JAR file to Nexus or artifactory" +excerpt: "" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Even though Codefresh has great support for containers, it can still be used for traditional JAR uploads of libraries or applications that are not dockerized yet. In this example we will compile a JAR and upload it to Nexus. The process is the same for Artifactory or any other package manager. + +For a Java application with Docker, see the [Gradle]({{site.baseurl}}/docs/learn-by-example/java/gradle/){} or + [Maven example]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/). + +## The example Java library project + +You can see the example project at [https://github.com/codefresh-contrib/plain-jar-sample-lib](https://github.com/codefresh-contrib/plain-jar-sample-lib). The repository contains a simple Java library built with Maven with the following goals: + +* `mvn package` creates a jar file of the library. It also runs unit tests. +* `mvn upload` uploads the jar to a package manager such as Nexus or Artifactory. + +We use Nexus for this example. To upload the Jar manually first edit the `pom.xml` with the URL of the package manager. The project also includes a [settings.xml](https://github.com/codefresh-contrib/plain-jar-sample-lib/blob/master/settings.xml) with parameterized credential. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/nexus-browser.png" +url="/images/learn-by-example/java/nexus-browser.png" +alt="The Nexus package manager" +caption="The Nexus package manager" +max-width="80%" +%} + +From your workstation you can upload the jar manually with: + + +``` +mvn -s settings.xml -Dserver.password=my-nexus-user -Dserver.username=my-nexus-pass deploy +``` +If you then visit Nexus you should see your JAR file in the snapshots repository. + +## Create a CI pipeline for publishing a JAR file + +[Create a new pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) in Codefresh and define as parameters your Nexus credentials. You could also use [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) or any other credential mechanism you already use in your other pipelines. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/nexus-credentials.png" +url="/images/learn-by-example/java/nexus-credentials.png" +alt="Parameters for Nexus" +caption="Parameters for Nexus" +max-width="50%" +%} + +Then copy/paste the [Codefresh YAML file](https://github.com/codefresh-contrib/plain-jar-sample-lib/blob/master/codefresh.yml) in the pipeline editor. +Here are the full contents of the file: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/plain-jar-sample-lib' + revision: master + git: github + publish_jar: + title: Upload to nexus + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -s settings.xml -Dserver.password=${{NEXUS_PASS}} -Dserver.username=${{NEXUS_USER}} deploy +{% endraw %} +{% endhighlight %} + +The pipeline starts by checking out the code using a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). The next step is a [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) one and packages the jar file. We also use the [Codefresh volume for caching]({{site.baseurl}}/docs/pipelines/pipeline-caching/#traditional-build-caching). + +You can define the version of Maven/JDK you want to use by picking the appropriate image from Dockerhub, or using any of your own images (even from [external registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/)). + +Note the use of the two user-defined environment variables passed to `server.password` and `server.username`. You will need to define those yourself. See the documentation on [User Procided Variables]({{site.baseurl}}/docs/pipelines/variables/#user-provided-variables). +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/publish-jar-pipeline.png" +url="/images/learn-by-example/java/publish-jar-pipeline.png" +alt="Publish JAR pipeline" +caption="Publish JAR pipeline" +max-width="100%" +%} + +Once the pipeline has finished you should see the JAR file in the Nexus browser UI. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/nexus-upload.png" +url="/images/learn-by-example/java/nexus-upload.png" +alt="Upload finished" +caption="Upload finished" +max-width="70%" +%} + +You can use the same pipeline for Artifactory or any other compliant Java package registry. + + +## Related articles +[Gradle example]({{site.baseurl}}/docs/example-catalog/ci-examples/java/gradle/) +[Spring boot example]({{site.baseurl}}/docs//example-catalog/ci-examples/spring-boot-2/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + + + + + diff --git a/_docs/example-catalog/ci-examples/python.md b/_docs/example-catalog/ci-examples/python.md new file mode 100644 index 000000000..d80cb991c --- /dev/null +++ b/_docs/example-catalog/ci-examples/python.md @@ -0,0 +1,11 @@ +--- +title: "Python" +description: "" +group: example-catalog +redirect_from: + - /docs/python/ +toc: true +--- +This section contains Codefresh examples based on Python. +- [Voting app]({{ site.baseurl }}/docs/learn-by-example/python/voting-app/) +- [Django]({{ site.baseurl }}/docs/learn-by-example/python/django/) diff --git a/_docs/example-catalog/ci-examples/react.md b/_docs/example-catalog/ci-examples/react.md new file mode 100644 index 000000000..0cb0466ec --- /dev/null +++ b/_docs/example-catalog/ci-examples/react.md @@ -0,0 +1,172 @@ +--- +title: "React example with Yarn" +description: "Create Docker images for React applications" +group: example-catalog +sub_group: nodejs +toc: true +--- + +Codefresh can work with React projects as with any [Node.js project]({{site.baseurl}}/docs/learn-by-example/nodejs/). + +## The example React project + +You can see the example project at [https://github.com/codefresh-contrib/react-sample-app](https://github.com/codefresh-contrib/react-sample-app){:target:"\_blank"}. The repository contains a React starter project with the following tasks: + +* `yarn test` runs unit tests. +* `yarn start` to start the application locally. +* `yarn build` to create a production deployment. + +Once launched the application presents a simple page at localhost:3000. + +## React and Docker (multi-stage builds) + +The easiest way to build a React.JS application is with [multi-stage builds](https://blog.docker.com/2017/07/multi-stage-builds/){:target:"\_blank"}. With multi-stage builds a Docker build can use one base image for packaging/unit tests and a different one that will hold the runtime of the application. This makes the final image more secure and smaller in size (as it does not contain any development/debugging tools). + +In the case of React, you can use a base image that has Node and all testing utilities, while the final image has your server (e.g. nginx) with the static content and nothing else. + +The example project is actually using multi-stage builds by default. + +Here is the multi-stage Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM node:8.16 as build-deps +WORKDIR /usr/src/app +COPY package.json yarn.lock ./ +RUN yarn +COPY . ./ +RUN yarn build + +FROM nginx:1.12-alpine +COPY --from=build-deps /usr/src/app/build /usr/share/nginx/html +EXPOSE 80 +CMD ["nginx", "-g", "daemon off;"] +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the Node/Yarn image +1. Copies the dependencies inside the container +1. Copies the source code and creates all static files +1. Discards the Node.js image with all the JavaScript libraries +1. Starts again from the nginx image and copies **static build result** created before + +The resulting is very small, as it contains only packaged/minified files. + +## Create a CI pipeline for React.js (Docker build) + +Creating a CI/CD pipeline for React is very easy, because Codefresh can run any [node image](https://hub.docker.com/_/node/){:target:"\_blank"} that you wish. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/nodejs/react-pipeline-docker.png" +url="/images/learn-by-example/nodejs/react-pipeline-docker.png" +alt="Creating a Docker image for react.js" +caption="Creating a Docker image for react.js" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh.yml){:target:"\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/react-sample-app' + revision: master + git: github + MyUnitTests: + title: Unit test + stage: test + image: node:8.16 + commands: + - yarn install + - yarn test + environment: + - CI=true + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: react-sample-app + working_directory: ./ + tag: 'with-nginx' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, runs unit tests and finally creates a Docker image. Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +## Building a React.Js application without Docker + +If your application is not dockerized yet, you can still create a pipeline that runs any command that you would run locally. You can also choose which Node version is used for each step of the pipeline by defining a different docker image for each step. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/nodejs/react-pipeline-build.png" +url="/images/learn-by-example/nodejs/react-pipeline-build.png" +alt="Building a Reach.js application" +caption="Building a Reach.js application" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/react-sample-app/blob/master/codefresh-only-build.yml){:target:"\_blank"} that creates a production deployment of all files. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/react-sample-app' + revision: master + git: github + MyUnitTests: + title: Unit test + stage: test + image: node:11.0 + commands: + - yarn install + - yarn test + environment: + - CI=true + MyReactBuild: + title: Packaging application + stage: build + image: node:8.16 + commands: + - yarn build +{% endraw %} +{% endhighlight %} + +Notice that for demonstration purposes we uses node 11 for the tests, and node 8 for the packaging. Normally you should use the same version of node/Yarn for all your steps, but Codefresh pipelines are flexible on version of tools. + +Even when you don't create a Docker image, Codefresh still caches your workspace volume. This means that `node_modules` are downloaded only once. All subsequent builds will be much faster. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/ruby.md b/_docs/example-catalog/ci-examples/ruby.md new file mode 100644 index 000000000..0758068e2 --- /dev/null +++ b/_docs/example-catalog/ci-examples/ruby.md @@ -0,0 +1,183 @@ +--- +title: "Ruby" +description: "How to build a Ruby On Rails project in Codefresh" +group: example-catalog +sub_group: ci-examples +toc: true +--- +Ruby on Rails is a very popular development framework that combines ease of use and a great amount of programming languages. In Codefresh, ROR projects behave like any other web application. You can easily build them, run [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) and launch them on [demo environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/). + +The example application is located at [https://github.com/codefresh-contrib/ruby-on-rails-sample-app](https://github.com/codefresh-contrib/ruby-on-rails-sample-app){:target:"\_blank"}. + + + +## Dockerize your Ruby on Rails project + +The first step should be to write a [Dockerfile](https://github.com/codefresh-contrib/ruby-on-rails-sample-app/blob/master/Dockerfile){:target:"\_blank"} for your Rails project. As an example we will use the following: + + + +`Dockerfile` +{% highlight docker %} +FROM ruby:2.3.1-slim + +RUN apt-get update && \ + apt-get install -y build-essential libcurl4-openssl-dev libxml2-dev libsqlite3-dev libpq-dev nodejs postgresql-client sqlite3 --no-install-recommends && \ + apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# throw errors if Gemfile has been modified since Gemfile.lock +RUN bundle config --global frozen 1 + +ENV APP_PATH /usr/src/app + +RUN mkdir -p $APP_PATH + +COPY Gemfile $APP_PATH +COPY Gemfile.lock $APP_PATH + +WORKDIR $APP_PATH + +RUN bundle install + +COPY . $APP_PATH + +ENV RAILS_ENV development + +RUN bin/rake db:migrate + +RUN bin/rake assets:precompile + +EXPOSE 3000 + +CMD ["bundle", "exec", "rails", "server", "-b", "0.0.0.0"] + +{% endhighlight %} + +Notice the order of commands and especially the fact that we copy the `Gemfile` on its own first, so that we take advantage of the Docker layer caching. + +>Codefresh also supports multi-stage docker builds. You can use one parent docker image for preparing your gem modules and another one for actually deployment the application. + +Once you have a Dockerfile, [creating a pipeline in Codefresh]({{site.baseurl}}/docs/pipelines/pipelines/) is very easy either from the GUI or with the yaml syntax. + +## Simple pipeline with Docker image and unit tests + +A very simple pipeline is one that has only two steps: + +1. Build the docker image +1. Run the tests inside the docker image that was just build + +Here is the example [codefresh.yml](https://github.com/codefresh-contrib/ruby-on-rails-sample-app/blob/master/codefresh.yml){:target:"\_blank"} file. + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/ruby-on-rails-sample-app' + revision: master + git: github + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: ruby-on-rails-sample-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + RunningUnitTests: + title: Running Unit Tests + image: '${{BuildingDockerImage}}' + commands: + - rails db:migrate + - rails test +{% endraw %} +{% endhighlight %} + +The first step is a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) named `BuildingDockerImage`. It reads the Dockerfile and creates a Docker image out of it. The second step is a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) called `RunningUnitTests`. It uses the image mentioned in the first step and executes custom commands inside it. + + +## Inspecting your Docker image + +You can see all your latest [Docker artifacts]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images) by selecting *Images* from the left sidebar. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/images.png" +url="/images/learn-by-example/ruby/images.png" +alt="Codefresh built-in Registry" +caption="Codefresh built-in Registry" +max-width="80%" +%} + +You can click on the image and get extra details. One of the tabs contains a visual explanation of the layers contained in the image. This view can be helpful when you are trying to make your Docker images smaller (which is a recommended practice) + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/layers.png" +url="/images/learn-by-example/ruby/layers.png" +alt="Ruby On Rails image filesystem layers" +caption="Ruby On Rails Image filesystem layers" +max-width="70%" +%} + +In Codefresh you can also use any other [external registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) such as Dockerhub, Azure, Google etc. + + +## Previewing the Ruby on Rails application in a Demo environment + +Codefresh has the unique capability of launching Docker images within its infrastructure for a quick demonstration (e.g. to customers and colleagues). + +In the example Rails repository, the default development "environment" is self-contained (it uses sqlite for a database). This makes it very easy to preview. + +Launch the environment by clicking at the rocket icon in the images view. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/launch.png" +url="/images/learn-by-example/ruby/launch.png" +alt="Launching a demo environment" +caption="Launching a demo environment" +max-width="50%" +%} + +A new build will start. Once it is complete your new environment will be created. You can inspect it by clicking in the *Compositions* menu on the left sidebar and then clicking *Running Compositions*. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/environment.png" +url="/images/learn-by-example/ruby/environment.png" +alt="Inspecting a demo environment" +caption="Inspecting a demo environment" +max-width="70%" +%} + +Click the *Open App* icon on the right and your browser will open a new tab with the environment. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/preview.png" +url="/images/learn-by-example/ruby/preview.png" +alt="Previewing a demo environment" +caption="Previewing a demo environment" +max-width="50%" +%} + + +You can share this link with other people in your team. + +>Demo environments are not intended for production purposes. Use them only for quick feedback. They also shutdown automatically after a period of inactivity. + + + +## Related articles +[Introduction to Pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[On demand environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) + + + diff --git a/_docs/example-catalog/ci-examples/run-integration-tests.md b/_docs/example-catalog/ci-examples/run-integration-tests.md new file mode 100644 index 000000000..9bbbbdc02 --- /dev/null +++ b/_docs/example-catalog/ci-examples/run-integration-tests.md @@ -0,0 +1,102 @@ +--- +title: "Run integration tests" +description: "Launch separate App and test containers" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/run-integration-tests/ +toc: true +--- +In this example, we will see a Java/Tomcat project using JUnit for unit tests and Spock for integration tests. For the integration test phase, we will launch both the application and the tests in order to run the integration tests against a real web instance (without mocking). + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/integration-tests.png" +url="/images/examples/integration-tests/integration-tests.png" +alt="Integration tests with Codefresh" +caption="Integration tests with Codefresh" +max-width="90%" +%} + +The integration tests will look at the application instance at `app:8080`. + +## Example Java/Tomcat/Spring project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-integration-tests](https://github.com/codefreshdemo/cf-example-integration-tests){:target:"\_blank"}. The repository contains the Java source code and some integration tests. + +You can play with it locally by using Docker compose to launch both the application and the tests. + +## Create a pipeline with separate integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/cf-example-integration-tests" + revision: "master" + git: github + stage: prepare + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "my-spring-app" + tag: "master" + dockerfile: "Dockerfile" + stage: build + build_test_image: + title: "Building Docker Test Image" + type: "build" + image_name: "my-junit-spock-tests" + tag: "master" + dockerfile: "Dockerfile.testing" + stage: test + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_test_image}}' + commands: + # Tomcat is certainly up at this point + - mvn verify -Dserver.host=app + services: + composition: + app: + image: '${{build_app_image}}' + ports: + - 8080 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: byrnedo/alpine-curl + commands: + - "curl http://app:8080/wizard/" + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with only Tomcat and the application WAR through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Builds a helper image that contains the source code and Maven to run integration tests. +1. Runs the `mvn verify` command in the helper image while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) with the Tomcat/Java image. + +Notice that we also use the `readiness` property in the testing phase to verify that the application +is actually up, before running the tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Service Containers]({{site.baseurl}}/docs/pipelines/service-containers/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/run-unit-tests.md b/_docs/example-catalog/ci-examples/run-unit-tests.md new file mode 100644 index 000000000..360da67ea --- /dev/null +++ b/_docs/example-catalog/ci-examples/run-unit-tests.md @@ -0,0 +1,106 @@ +--- +title: "Run unit tests" +description: "Running unit tests in Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/run-unit-tests/ +toc: true +--- + +As explained in [unit tests]({{site.baseurl}}/docs/testing/unit-tests/), Codefresh supports several ways of running unit tests. The most common scenarios use an existing Docker Hub image (common with compiled languages such as Java and Go), or the application image itself (common with languages such as JavaScript/Python/Ruby/PHP). + +In this example, we will see both ways using two different applications in a single pipeline. + +{% include image.html +lightbox="true" +file="/images/examples/unit-tests/unit-tests-pipeline.png" +url="/images/examples/unit-tests/unit-tests-pipeline.png" +alt="Unit tests with Codefresh" +caption="Unit tests with Codefresh" +max-width="90%" +%} + +In the first case, we run unit tests *before* creating the application docker image. In the second case, we run the unit tests +*inside* the application Docker image. + +## Example Python/Go project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-unit-test](https://github.com/codefreshdemo/cf-example-unit-test){:target="\_blank"}. The repository contains two applications (Python and Go) with their respective unit tests. + +You can play with it locally by using Docker commands to package the applications. + +## Create a pipeline with unit tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - 'Microservice A' + - 'Microservice B' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-unit-test' + revision: 'master' + git: github + stage: prepare + run_my_tests_before_build: + title: Running Unit tests directly + stage: 'Microservice A' + image: golang:1.12 + working_directory: './golang-app-A' + commands: + - go test -v + build_after_my_tests: + title: Building Go Docker Image + type: build + stage: 'Microservice A' + image_name: my-go-image + working_directory: './golang-app-A' + tag: 'master' + dockerfile: Dockerfile + build_before_my_tests: + title: Building Python Docker Image + type: build + stage: 'Microservice B' + image_name: my-python-image + working_directory: './python-app-B' + tag: 'master' + dockerfile: Dockerfile + run_my_tests_inside_image: + title: Running Unit tests inside App image + stage: 'Microservice B' + image: ${{build_before_my_tests}} + working_directory: '/app' + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Runs unit test for the GO application using the Dockerhub image `golang:1.12`. +1. Builds the Docker image for the Go application through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Builds the Docker image for the Python application. +1. Runs unit tests for the Python application using as runtime context the application image that was just created. + + +In the second case, the tests run in the context of `build_before_my_tests` which is the name of the step that creates the Docker image for Python. Read more about [context variables]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables). + +We generally recommend the first approach, so that your production Docker image does not contain any unit testing libraries or frameworks, but there is no right or wrong choice regarding the way you run unit tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Service Containers]({{site.baseurl}}/docs/pipelines/service-containers/) +[Freestyle step]({{site.baseurl}}/docs/pipelines/steps/) + + diff --git a/_docs/example-catalog/ci-examples/rust.md b/_docs/example-catalog/ci-examples/rust.md new file mode 100644 index 000000000..1efd443bb --- /dev/null +++ b/_docs/example-catalog/ci-examples/rust.md @@ -0,0 +1,84 @@ +--- +title: "Compile and test a Rust application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any Rust application very easily as both `rustc` and `cargo` are already offered in Dockerhub. + +## The example Rust project + +You can see the example project at [https://github.com/codefresh-contrib/rust-sample-app](https://github.com/codefresh-contrib/rust-sample-app){:target="\_blank"}. The repository contains a Rust starter project with a dummy unit test. + +* `cargo build` compiles the code. +* `cargo test` runs unit tests +* `cargo clean` removes artifacts and binaries. + + +## Create a CI pipeline for Rust applications + +Creating a CI/CD pipeline for Rust is very easy, because Codefresh can run any [Rust image](https://hub.docker.com/_/rust){:target="\_blank"} that you wish. Rust docker images already contain the `cargo` package manager. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/rust/rust-pipeline.png" +url="/images/learn-by-example/rust/rust-pipeline.png" +alt="Compiling a Rust application in a pipeline" +caption="Compiling a Rust application in a pipeline" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/rust-sample-app/blob/master/codefresh.yml){:target="\_blank"} that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "test" +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/rust-sample-app" + revision: "master" + stage: "clone" + compile: + title: "Building Code" + type: "freestyle" + image: "rust:1.44-stretch" + working_directory: "${{clone}}" + environment: + - CARGO_HOME=/codefresh/volume/cargo + commands: + - "cargo build" + stage: "build" + test: + title: "Running tests" + type: "freestyle" + image: "rust:1.44-stretch" + working_directory: "${{clone}}" + environment: + - CARGO_HOME=/codefresh/volume/cargo + commands: + - "cargo test" + stage: "test" + +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, compiles the code and runs unit tests. In all cases we use the public Docker image of Rust that also contains `cargo`. + +We also pass the `CARGO_HOME` environment variable to place the Cargo cache on the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). See the [Caching documentation]({{site.baseurl}}/docs/pipelines/pipeline-caching/#traditional-build-caching) for more details. + + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/scala-hello-world.md b/_docs/example-catalog/ci-examples/scala-hello-world.md new file mode 100644 index 000000000..66681d4ad --- /dev/null +++ b/_docs/example-catalog/ci-examples/scala-hello-world.md @@ -0,0 +1,184 @@ +--- +title: "Scala: Hello World" +description: "Use Scala and Codefresh to clone, package, and build a Docker image" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/scala-hello-world/ +toc: true +--- + +So, you’ve decided to try Codefresh? Welcome on board! + +We’ll help you get up to speed with basic functionality such as: compiling, building Docker images and launching. + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) + +## The Example Scala Application + +This project uses `Scala` to build an application which will eventually become a distributable Docker image. + +You can find the example application on [GitHub](https://github.com/codefresh-contrib/scala-hello-world-app){:target="\_blank"}. + +There are two pipeline examples provided in this tutorial: + +- Multi-stage Docker build +- Single stage Docker Build + +## Example Pipeline #1: Single stage Docker Build + +This example uses a single stage Docker build. The pipeline will have three stages: + +- A stage for cloning +- A stage for packaging +- A stage for building + +{% include image.html +lightbox="true" +file="/images/examples/scala/single-stage-pipeline.png" +url="/images/examples/scala/single-stage-pipeline.png" +alt="Codefresh UI pipeline view" +caption="Codefresh UI pipeline view" +max-width="100%" +%} + +Here is the Dockerfile used for this example: + +`Dockerfile-single-stage` +```shell +FROM openjdk:8-jre-alpine3.9 + +COPY . . + +CMD ["java", "-cp", "target/scala-2.12/*.jar:scala-library-2.12.2.jar", "HelloWorld"] +``` + +And here is the pipeline. You can copy and paste it in the inline YAML editor in the UI: + + `codefresh-single-stage.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - clone + - package + - build + +steps: + clone: + title: Cloning repository... + type: git-clone + stage: clone + arguments: + repo: codefresh-contrib/scala-hello-world-app + revision: master + package: + title: Packaging application... + type: freestyle + stage: package + working_directory: ./scala-hello-world-app + arguments: + image: hseeberger/scala-sbt:11.0.6_1.3.9_2.13.1 + commands: + - sbt -Dsbt.ivy.home=/codefresh/volume/ivy_cache clean compile package + - cp /codefresh/volume/ivy_cache/cache/org.scala-lang/scala-library/jars/scala-library-2.12.2.jar . + build_image: + title: Building Docker image... + type: build + working_directory: ${{clone}} + stage: build + arguments: + image_name: codefresh/scala-sample-app + tag: 1.0.0 + dockerfile: Dockerfile-single-stage +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: + +1. A [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step that clones the main repository +2. A [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that uses an SBT image that packages the application (note how `sbt.ivy.home` is set to an arbitrarily named directory that is part of the codefresh volume). This ensures we cache dependencies to [speed up builds]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/#caching-the-maven-dependencies), similar to Maven. +3. The last step, `build_image`, is a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) that builds a Docker image using the Dockerfile provided in the repository. + +## Example Pipeline #2: Multi-stage Docker Build + +This example uses a multi stage Docker build. The pipeline will have only two stages this time, as packaging of the app is handled in the Dockerfile itself: + +- A stage for cloning +- A stage for building + +{% include image.html +lightbox="true" +file="/images/examples/scala/multi-stage-pipeline.png" +url="/images/examples/scala/multi-stage-pipeline.png" +alt="Codefresh UI pipeline view" +caption="Codefresh UI pipeline view" +max-width="100%" +%} + +Here, you will find the multi-stage Dockerfile, copying over only the jars we need: + +`Dockerfile-multi-stage` + +```shell +# first stage + +FROM hseeberger/scala-sbt:11.0.6_1.3.9_2.13.1 AS build + +COPY ./ ./ + +RUN sbt compile clean package + +# second stage + +FROM openjdk:8-jre-alpine3.9 + +COPY --from=build /root/target/scala-2.12/*.jar /scala-hello-world-sample-app.jar +COPY --from=build /root/.ivy2/cache/org.scala-lang/scala-library/jars/scala-library-2.12.2.jar /scala-library-2.12.2.jar + +CMD ["java", "-cp", "scala-hello-world-sample-app.jar:scala-library-2.12.2.jar", "HelloWorld"] +``` +Here is the pipeline, you can copy and paste it into the inline YAML editor: + +`codefresh-multi-stage.yml` + +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - clone + - build + +steps: + clone: + title: Cloning repository... + type: git-clone + stage: clone + arguments: + repo: codefresh-contrib/scala-hello-world-app + revision: master + build_image: + title: Building Docker image... + type: build + working_directory: ${{clone}} + stage: build + arguments: + image_name: codefresh/scala-hello-world-app + tag: 1.0.0 + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +1. A [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step that clones the main repository +2. A [build step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that builds our code into a Docker image using the Dockerfile present in the repository + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Freestyle Step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + diff --git a/_docs/example-catalog/ci-examples/scala.md b/_docs/example-catalog/ci-examples/scala.md new file mode 100644 index 000000000..7415259d2 --- /dev/null +++ b/_docs/example-catalog/ci-examples/scala.md @@ -0,0 +1,10 @@ +--- +title: "Scala" +description: "" +group: example-catalog +redirect_from: + - /docs/scala/ +toc: true +--- +This section contains Codefresh examples based on Scala. +- [Scala: Hello World]({{site.baseurl}}/docs/learn-by-example/scala/scala-hello-world/) diff --git a/_docs/example-catalog/ci-examples/sending-the-notification-to-jira.md b/_docs/example-catalog/ci-examples/sending-the-notification-to-jira.md new file mode 100644 index 000000000..2d0245099 --- /dev/null +++ b/_docs/example-catalog/ci-examples/sending-the-notification-to-jira.md @@ -0,0 +1,88 @@ +--- +title: "Send notification to Jira" +description: "" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +The plugin marketplace offers several freestyle steps for your Codefresh pipeline. + +One of those steps is the [Jira Issue Manager](https://codefresh.io/steps/step/jira-issue-manager){:target:"\_blank"}. + +## Prerequisites +* [Codefresh pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) +* [Jira account](https://www.atlassian.com/software/jira){:target:"\_blank"} + +## Example +This documentation uses the following [example](https://github.com/codefresh-contrib/jira-demo-app){:target:"\_blank"}. You can either use the example provided to try out the Jira integration or follow along with your own application. + +1. You need an issue in your Jira account that you want to link to your Codefresh pipeline. If you do not have one yet, please create an issue. (Note that the project type and who is creating the issue etc. does not matter.) Alternatively, you can also create an issue first with the Jira step. However, this is not explained in this example. + +2. Next, add the following step to your Codefresh pipeline. In case you are using the example, the [codefresh.yml](https://github.com/codefresh-contrib/jira-demo-app/blob/master/codefresh.yml){:target:"\_blank"} file is already added. + +{% highlight yaml %} + JiraCommentCreate: + title: "Add Jira Comment" + type: "jira-issue-manager" + stage: "deploy" + arguments: + JIRA_BASE_URL: '${{JIRA_BASE_URL}}' + JIRA_USERNAME: '${{JIRA_USERNAME}}' + JIRA_API_KEY: '${{JIRA_API_KEY}}' + JIRA_ISSUE_SOURCE_FIELD: '${{JIRA_ISSUE_SOURCE_FIELD}}' + ACTION: "comment_create" + COMMENT_BODY: "Build number ${{CF_BUILD_URL}} finished in Codefresh" +{% endhighlight yaml %} + +Let's look in detail at this step. +- Everything up to the arguments is similar to other Codefresh steps. + +These arguments are required to use the step: +- `JIRA_BASE_URL`: This is the url of your organisation e.g. 'https://company-name.atlassian.net' +- `JIRA_USERNAME`: This is usually the e-mail that you are logged in with at Jira +- `JIRA_API_KEY`: Note that you will have to create this key. The official [Atlassian documentation](https://confluence.atlassian.com/cloud/api-tokens-938839638.html){:target:"\_blank"} details how it can be created. + +Then we added these arguments for our specific step: +- `JIRA_ISSUE_SOURCE_FIELD`: This is the tag that identifies your issue, for example, `MKTG-102` +- Within the comment, we use a [Codefresh native variable]({{site.baseurl}}/docs/docs/pipelines/variables/) `CF_BUILD_URL`, which references your pipeline build and allows you to search for your pipeline. + +All variables use the Codefresh-specific variable notation ${% raw %}`{{MY_VARIABLE_EXAMPLE}}`{% endraw %}`. + +Since it is a new stage in your Codefresh pipeline, you want to add it at the top to your stages, e.g.: + +{% highlight yaml %} + stages: + - "clone" + - "build" + - "JiraCommentCreate" +{% endhighlight yaml %} + +Note that you can [provide the variables]({{site.baseurl}}/docs/pipelines/shared-configuration/) needed for the Jira step directly in the shared configuration. The benefits are: +* You do not have to post sensitive information, such as the API key, directly in the codefresh.yml. +* If you use the same step across multiple pipelines, you don't have to copy-paste the same variables. + +Once you run the pipeline, you should be able to see the following output or similar: + +{% include image.html +lightbox="true" +file="/images/integrations/jira/codefreshpipeline.png" +url="/images/integrations/jira/codefreshpipeline.png" +alt="Pipeline with Jira integration" +max-width="80%" +%} + +And the comment, including the URL to the pipeline, should be added to your Jira issue: + +{% include image.html +lightbox="true" +file="/images/integrations/jira/jira-comment.png" +url="/images/integrations/jira/jira-comment.png" +alt="Comment in Jira" +max-width="80%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Sending notifications to Slack]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-slack/) +[Create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) diff --git a/_docs/example-catalog/ci-examples/sending-the-notification-to-slack.md b/_docs/example-catalog/ci-examples/sending-the-notification-to-slack.md new file mode 100644 index 000000000..1af32946d --- /dev/null +++ b/_docs/example-catalog/ci-examples/sending-the-notification-to-slack.md @@ -0,0 +1,44 @@ +--- +title: "Send notification to Slack" +description: "Connect your Codefresh pipelines to Slack" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/sending-the-notification-to-slack/ +toc: true +--- + +There are many ways to integrate Slack with Codefresh: + +1. Use the [global slack integration]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) +1. Use individual pipeline plugins such [slack-message-sender](https://codefresh.io/steps/step/slack-message-sender){:target:"\_blank"} and [slack-notifier](https://codefresh.io/steps/step/slack-notifier){:target:"\_blank"} +1. Use simple POST requests with Curl, as explained in this article + +## Custom webhook to Slack + +Use a container image with a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) such as `byrnedo/alpine-curl` to send a notification to a Slack channel. + +{:start="1"} +1. Get the {% raw %}```${{SLACK_WEB_URL}}```{% endraw %} and put it in the Environment Variables or use [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). + + > To integrate with Slack, see [https://api.slack.com/incoming-webhooks](https://api.slack.com/incoming-webhooks){:target="_blank"}. + +{:start="2"} +2. Add the following step to your `codefresh.yml`: + + `slack step` +{% highlight yaml %} +slack_notify: + image: byrnedo/alpine-curl # curlimages/curl, or any other curl image + commands: + - curl -X POST --data-urlencode 'payload={"text":"Test slack integration via yaml"}' {% raw %}${{SLACK_WEB_URL}}{% endraw %} +{% endhighlight %} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Global Slack Integration]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) +[Advanced Workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +[Hooks in pipelines]({{site.baseurl}}/docs/pipelines/hooks/) +[Shared Configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) + diff --git a/_docs/example-catalog/ci-examples/shared-volumes-between-builds.md b/_docs/example-catalog/ci-examples/shared-volumes-between-builds.md new file mode 100644 index 000000000..99db466d7 --- /dev/null +++ b/_docs/example-catalog/ci-examples/shared-volumes-between-builds.md @@ -0,0 +1,115 @@ +--- +title: "Share data between pipeline steps" +description: "How to cache folders between steps and builds" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/shared-volumes-between-builds/ +toc: true +--- + +Codefresh creates a [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) in each pipeline that is automatically shared with all freestyle steps. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume.png" +url="/images/pipeline/introduction/codefresh-volume.png" +alt="Codefresh volume" +caption="All steps share the same volume" +max-width="90%" +%} + +This volume exists at `/codefresh/volume` by default. Simply copy files there to have them available to all Codefresh steps (as well as subsequent builds of the same pipeline). + +>The [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) deletes any files **not** specified in `.gitignore`. To cache a folder that exists in your project directory (such as `node_modules`), you must also add it to `.gitignore` + +## Using the shared volume + +You can see the example project at [https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds](https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds){:target="\_blank"}. The repository contains a simple application, a Dockerfile, and an example pipeline that saves/reads a dummy file to the Codefresh volume. + + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "shared-volume" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefreshdemo/cf-example-shared-volumes-between-builds" + revision: "master" + stage: "clone" + + build_image: + title: "Building image" + type: "build" + image_name: "sample-app" + working_directory: "${{clone}}" + tag: "demo" + dockerfile: "Dockerfile" + stage: "build" + + copy_to_shared_volume: + title: "Copy file to shared volume" + type: "freestyle" + image: alpine:3.9 + working_directory: "${{clone}}" + commands: + - ls -l /codefresh/volume/ + - cp ./artifact/artifact.example /codefresh/volume/artifact.example + stage: "shared-volume" + + list_shared_volume: + title: "List shared volume files" + type: "freestyle" + image: alpine:3.9 + working_directory: "${{clone}}" + commands: + - pwd + - ls -l /codefresh/volume + stage: "shared-volume" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Copies the file `artifact.example` to the volume through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Reads the contents of the volume through a different freestyle step. + +If you run the pipeline, you will see the file contents in the fourth step: + +{% include +image.html +lightbox="true" +file="/images/examples/shared-workspace/volume-list.png" +url="/images/examples/shared-workspace/volume-list.png" +alt="Listing volume contents" +caption="Listing volume contents" +max-width="80%" +%} + + +If you run the pipeline a second time, you will see the dummy file in all steps, as the volume is automatically cached for subsequent builds as well. + + +## Caching build dependencies and Docker layers + +Read more about caching build dependencies in [caching in pipelines]({{site.baseurl}}/docs/pipelines/pipeline-caching/), and in this [blog post](https://codefresh.io/blog/caching-build-dependencies-codefresh-volumes/){:target:"\_blank"}. + + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle) diff --git a/_docs/example-catalog/ci-examples/shared-volumes-of-service-from-composition-step-for-other-yml-steps.md b/_docs/example-catalog/ci-examples/shared-volumes-of-service-from-composition-step-for-other-yml-steps.md new file mode 100644 index 000000000..d99b74cf3 --- /dev/null +++ b/_docs/example-catalog/ci-examples/shared-volumes-of-service-from-composition-step-for-other-yml-steps.md @@ -0,0 +1,45 @@ +--- +title: "Share service volumes in composition steps" +description: "How to share data in compositions" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/shared-volumes-of-service-from-composition-step-for-other-yml-steps/ +toc: true +--- +Using this repository, we'll help you get up to speed with basic functionality such as building Docker images and using the shared volumes. + +This project uses Node Js to build an application which will eventually become a distributable Docker image. +To share volumes of service in composition step for other yml steps you can use the variable {% raw %}```${{CF_VOLUME_NAME}}```{% endraw %}. It will refer to the volume that was generated for the specific flow. Can be used in conjunction with a composition to provide access to your cloned repository. + +>Read more about caching build dependencies our [blog](https://codefresh.io/blog/caching-build-dependencies-codefresh-volumes/){:target="_blank"}. + +## Looking around +In the root of this repository you'll find a file named `codefresh.yml`, this is our build descriptor that describes the different steps that comprise our process. Let's quickly review the contents of this file: + + `codefresh.yml` +{% highlight yaml %} +step_file_generation: + type: composition + composition: + version: '2' + services: + service1: + volumes: + - {% raw %}${{CF_VOLUME_NAME}}{% endraw %}:/codefresh/volume + image: {% raw %}${{build_step}}{% endraw %} + command: bash -c "echo hello > /codefresh/volume/myfile.txt" + composition_candidates: + test: + image: {% raw %}${{build_step}}{% endraw %} + command: echo hello +{% endhighlight %} + +>Example + Just head over to the example [**repository**](https://github.com/codefreshdemo/cf-example-shared-volumes-composition-step){:target="_blank"} in GitHub, and follow the instructions there. + +The way the volume is shared between builds is that upon build completion we create an image of the volume state to be used in the next builds. If you run 2 builds in parallel from the same pipeline and at the same time, each will use the same last volume image, but it’ll run separately on both. The volume image you’ll get upon completion is the state of the build that finished last. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) diff --git a/_docs/example-catalog/ci-examples/spring-boot-2.md b/_docs/example-catalog/ci-examples/spring-boot-2.md new file mode 100644 index 000000000..37230e51f --- /dev/null +++ b/_docs/example-catalog/ci-examples/spring-boot-2.md @@ -0,0 +1,252 @@ +--- +title: "Spring Boot 2/Maven" +description: "Create Docker images for Spring/Maven" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/spring-boot-2/ + - /docs/java/spring-boot-2/ +toc: true +--- + +Spring Boot is quickly becoming a very popular choice for building Java back-end applications. Compared to traditional application servers,it is a bit different since it includes a servlet container in the final JAR file allowing +for self-contained Java Archives (JAR files). + +Codefresh can easily handle Spring Boot applications that are dockerized either in the traditional way or using multi-stage builds. + +## The example Java project + +You can see the example project at [https://github.com/codefresh-contrib/spring-boot-2-sample-app](https://github.com/codefresh-contrib/spring-boot-2-sample-app){:target="\_blank"}. The repository contains a Spring Boot 2 project built with Maven with the following goals: + +* `mvn package` creates a jar file that can be run on its own (exposes port 8080). It also runs unit tests. +* `mvn verify` runs integration tests as well. The application is launched locally as part of the Maven lifecycle. + +Once launched the application presents a simple message at localhost:8080 and also at the various `/actuator/health` endpoints. You can use the standard `spring-boot:run` command to run it locally (without Docker). + +## Spring Boot 2 and Docker (package only) + +A Dockerfile is also provided at the same repository. It uses the base JRE image and just copies the JAR file inside the container. + + `Dockerfile.only-package` +{% highlight docker %} +{% raw %} +FROM java:8-jre-alpine + +EXPOSE 8080 + +RUN mkdir /app +COPY target/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] + +HEALTHCHECK --interval=1m --timeout=3s CMD wget -q -T 3 -s http://localhost:8080/actuator/health/ || exit 1 + +{% endraw %} +{% endhighlight %} + +This means that _before_ building the Docker image, the compilation step (`mvn package`) is expected to be finished already. Therefore, in the `codefresh.yml` file we need at least two steps. The first one should prepare the JAR file and the second +one should create the Docker image. + +### Create a CI pipeline for Spring + +The repository also contains a premade [Codefresh YAML file]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) that you can use as a starting point in your own Spring Boot 2 projects. + +Here are the full contents of the file. + + `codefresh-package-only.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build + - 'integration test' +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + run_unit_tests: + title: Compile/Unit test + stage: test + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'non-multi-stage' + dockerfile: Dockerfile.only-package + run_integration_tests: + title: Integration test + stage: 'integration test' + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://my-spring-app + services: + composition: + my-spring-app: + image: '${{build_app_image}}' + ports: + - 8080 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: byrnedo/alpine-curl + commands: + - "curl http://my-spring-app:8080/" +{% endraw %} +{% endhighlight %} + +The pipeline starts by checking out the code using a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). The next step is a [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) one and packages the jar file. Next we have a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) that creates the docker image. Finally we have another freestyle +step that uses [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) to run integration tests. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/spring-boot-steps.png" +url="/images/learn-by-example/java/spring-boot-steps.png" +alt="Spring boot pipeline" +caption="Spring boot pipeline" +max-width="80%" +%} + +After checking out the code we use the standard [Maven Docker image](https://hub.docker.com/_/maven/){:target="\_blank"} to compile the Spring Boot source code and create a JAR file. We also pass a parameter that changes the Maven cache location folder. The reason for this parameter is that the default Maven location is `/root/.m2` which is defined as a volume (and thus discarded after each build). + +### Caching the Maven dependencies + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/pipeline-caching/) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for Maven/Gradle which keep their cache externally. By changing the location of the Maven repo on the project folder (the `m2_repository` name is arbitrary) we make sure that Codefresh will cache automatically the Maven libraries resulting in much faster builds. + +The next step is a Docker build. We name our image **spring-boot-2-sample-app** and tag it with a string `non-multi-stage` but of course you can use any other tag name that you wish. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/spring-boot-docker-image.png" +url="/images/learn-by-example/java/spring-boot-docker-image.png" +alt="Spring Boot Docker image" +caption="Spring Boot Docker image" +max-width="80%" +%} + +Once the pipeline is finished you will see the Spring Boot 2 Docker image your [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images). + +The last step is similar to the unit tests, but this time we run integration tests. We define again a custom cache folder so when you run the build you will see that Maven will automatically pick the cache from the previous step. All Codefresh steps in a pipeline [run on the same workspace]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps), so the build results from one step are visible to the next. + +>Notice that because the [Maven lifecycle](https://maven.apache.org/guides/introduction/introduction-to-the-lifecycle.html){:target="\_blank"} also executes the previous steps in a build, the `mvn verify` command essentially will run `mvn package` as well. In theory we could just have the _Integration_ step in this pipeline on its own. That step would build the code, run unit and integration tests all in one stage. For demonstration purposes however, we include two steps so that you can see the correct usage of Maven cache. + + +## Spring Boot 2 and Docker (multi-stage builds) + +Docker added [multi-stage builds](https://blog.docker.com/2017/07/multi-stage-builds/){:target="\_blank"} at version 17.05. With multi-stage builds a Docker build can use one base image for compilation/packaging/unit tests and a different one that will hold the runtime of the application. This makes the final image more secure and smaller in size (as it does not contain any development/debugging tools). + +In the case of Java, multistage builds allow for the compilation itself to happen during the build process, even though the final Docker image will not contain a full JDK. + + +Here is the multi-stage build definition: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM maven:3.5.2-jdk-8-alpine AS MAVEN_TOOL_CHAIN +COPY pom.xml /tmp/ +RUN mvn -B dependency:go-offline -f /tmp/pom.xml -s /usr/share/maven/ref/settings-docker.xml +COPY src /tmp/src/ +WORKDIR /tmp/ +RUN mvn -B -s /usr/share/maven/ref/settings-docker.xml package + +FROM java:8-jre-alpine + +EXPOSE 8080 + +RUN mkdir /app +COPY --from=MAVEN_TOOL_CHAIN /tmp/target/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] + +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the standard Maven Docker image +1. Copies only the `pom.xml` file inside the container +1. Runs a mvn command to download all dependencies found in the `pom.xml` +1. Copies the rest of the source code in the container +1. Compiles the code and runs unit tests (with `mvn package`) +1. Discards the Maven image with all the compiled classes/unit test results etc +1. Starts again from the JRE image and copies **only** the JAR file created before + +The order of the steps is tuned so that it takes advantage of the layer caching built-in to Docker. +If you change something in the source code Docker already has a layer with Maven dependencies so they +will not be re-downloaded again. Only if you change the `pom.xml` file itself, Docker will start again from the lowest layer. + +Again, we define a custom location for the Maven cache (using the `settings-docker.xml` file). This way the Maven dependencies are placed inside the container and they will be cached automatically with the respective layer (Read more about this technique [at the official documentation](https://github.com/carlossg/docker-maven#packaging-a-local-repository-with-the-image){:target="\_blank"}. + +### Create a CI pipeline for Spring (multi-stage Docker builds) + +Because in multi-stage builds Docker itself handles most of the build process, moving the project to Codefresh is straightforward. We just need [a single step](https://github.com/codefresh-contrib/spring-boot-2-sample-app/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. The integration test step is the same as before. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build + - 'integration test' +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'multi-stage' + dockerfile: Dockerfile + run_integration_tests: + title: Integration test + stage: 'integration test' + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://my-spring-app + services: + composition: + my-spring-app: + image: '${{build_app_image}}' + ports: + - 8080 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: byrnedo/alpine-curl + commands: + - "curl http://my-spring-app:8080/" +{% endraw %} +{% endhighlight %} + +This will compile/test/package the Spring Boot application and create a Docker image. Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +## Related articles +[Gradle example]({{site.baseurl}}/docs/example-catalog/ci-examples/gradle/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/ci-examples/uploading-or-downloading-from-gs.md b/_docs/example-catalog/ci-examples/uploading-or-downloading-from-gs.md new file mode 100644 index 000000000..1bfcf82df --- /dev/null +++ b/_docs/example-catalog/ci-examples/uploading-or-downloading-from-gs.md @@ -0,0 +1,152 @@ +--- +title: "Upload/Download files to/from Google Storage" +description: "Upload and download a JAR from Google Storage from within a pipeline" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +- A [Google Storage Bucket](https://cloud.google.com/storage/docs/creating-buckets){:target="\_blank"} with public read access +- A private key [downloaded](https://cloud.google.com/storage/docs/authentication#gsutilauth){:target="\_blank"} for the existing service account associated with your bucket (for this example, we base64 encoded the key for ease of use in a pipeline variable using `base64 key_file.json > key_file.b64`) + +## Example Project + +The example project is at [GitHub](https://github.com/codefresh-contrib/gcloud-storage-sample-app.git){:target="\_blank"}. The application is a simple Scala Hello World application contained in a jar, with a dependency on a scala-library jar which we will download from the bucket and package into a Docker image. + +Our project contains two pipelines, one to upload the dependency JAR _to_ our bucket, and the other to download the JAR _from_ the bucket. + +## Create the first pipeline + +The first pipeline contains one stage/step, to upload the JAR to the Google Storage Bucket. + +{% include image.html +lightbox="true" +file="/images/examples/gs/gs-upload-pipeline.png" +url="/images/examples/gs/gs-upload-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +You need to define a pipeline variable, KEY_FILE, in the pipeline settings: + +{% include image.html +lightbox="true" +file="/images/examples/gs/gs-pipeline-vars.png" +url="/images/examples/gs/gs-pipeline-vars.png" +alt="Codefresh UI Pipeline Variables" +caption="Codefresh UI Pipeline Variables" +max-width="70%" +%} + +Here is the first pipeline: + +`codefresh-upload.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - "upload" + +steps: + upload: + title: "Uploading library jar to GS..." + type: "freestyle" + stage: "upload" + arguments: + image: "google/cloud-sdk:slim" + commands: + - echo $KEY_FILE | base64 --decode > key_file.json + - gcloud auth activate-service-account --key-file=key_file.json + - curl https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.12.2/scala-library-2.12.2.jar | gsutil cp - gs://anna-demo-bucket/scala-library-2.12.2.jar +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Uploads a JAR from Maven into our Google Storage bucket through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +## Create the second pipeline + +Our second pipeline has four stages: + +- A stage for cloning the repository +- A stage for downloading the jar from the bucket +- A stage for building the image +- A stage for pushing the image to the repository + +{% include image.html +lightbox="true" +file="/images/examples/gs/gs-download-pipeline.png" +url="/images/examples/gs/gs-download-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +Here is the YAML for the second pipeline: + +`codefresh-download.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - "clone" + - "download" + - "build" + - "push" + +steps: + clone: + title: "Cloning main repository..." + type: "git-clone" + stage: "clone" + arguments: + repo: "codefresh-contrib/gcloud-storage-sample-app" + git: "github" + revision: "master" + download: + title: "Downloading dependency lib from GS..." + type: "freestyle" + stage: "download" + working_directory: ${{clone}} + arguments: + image: "google/cloud-sdk:slim" + commands: + - gsutil cp gs://anna-demo-bucket/scala-library-2.12.2.jar . + build: + title: "Building docker image..." + type: "build" + stage: "build" + working_directory: ${{clone}} + arguments: + image_name: "annabaker/gcloud-storage-sample-app" + tag: "master" + push_to_my_registry: + stage: "push" + type: "push" + title: "Pushing to external registry..." + arguments: + candidate: ${{build}} + tag: '1.0.0' + registry: "dockerhub" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Downloads the dependency JAR from our publicly-accessible Google Storage bucket through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Builds a docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +4. Pushes the Docker image to the DockerHub registry you have integrated with Codefresh through a [push step](https://codefresh.io/docs/docs/pipelines/steps/push/). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + + diff --git a/_docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline.md b/_docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline.md new file mode 100644 index 000000000..d02cee77c --- /dev/null +++ b/_docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline.md @@ -0,0 +1,116 @@ +--- +title: "Vault secrets in pipelines" +description: "Access and refer to Vault secrets in pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh offers a Vault plugin you may use from the [Step Marketplace](https://codefresh.io/steps/step/vault){:target="\_blank"}. The plugin imports key-value pairs from the Vault server, and exports them into the pipeline. + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +- An existing Vault server [already setup](https://learn.hashicorp.com/vault/getting-started/install){:target="\_blank"} +- A secret stored in said Vault server with a key of `password` +- A Vault [authorization token](https://learn.hashicorp.com/vault/getting-started/authentication#tokens){:target="\_blank"} + +## Example Java application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/vault-sample-app){:target="\_blank"}. + +The example application retrieves the system variable `password` from the pipeline, and uses it to authenticate to a Redis database, but you are free to use any type of database of your choosing. + +```java + String password = System.getenv("password"); + String host = System.getProperty("server.host"); + + RedisClient redisClient = new RedisClient( + RedisURI.create("redis://" + password + "@" + host + ":6379")); + RedisConnection connection = redisClient.connect(); +``` + +Also in the example application is a simple unit test that ensures we are able to read and write data to the database. + +You cannot run the application locally, as it needs to run in the pipeline in order to use our environment variables to connect. + +## Create the pipeline + +The following pipeline contains three steps: a vault step, a [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step, and a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +{% include image.html +lightbox="true" +file="/images/examples/secrets/vault-pipeline.png" +url="/images/examples/secrets/vault-pipeline.png" +alt="Vault pipeline" +caption="Vault Pipeline" +max-width="100%" +%} + +You should be able to copy and paste this YAML into the in-line editor in the Codefresh UI. It will automatically clone the project for you. + +Note that you need to change the `VAULT_ADDR`, `VAULT_AUTH`, and `VAULT_AUTH_TOKEN` arguments within the first step to your respective values. + +`codefresh.yml` +```yaml +version: "1.0" +stages: + - "vault" + - "clone" + - "package" +steps: + vault: + title: Importing vault values... + stage: "vault" + type: vault + arguments: + VAULT_ADDR: 'http://:' + VAULT_PATH: 'path/to/secret' + VAULT_AUTH_TOKEN: '' + clone: + title: Cloning main repository... + type: git-clone + arguments: + repo: 'codefresh-contrib/vault-sample-app' + git: github + stage: clone + package_jar: + title: Packaging jar and running unit tests... + stage: package + working_directory: ${{clone}} + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dserver.host=my-redis-db-host clean package + services: + composition: + my-redis-db-host: + image: 'redis:4-alpine' + command: 'redis-server --requirepass $password' + ports: + - 6379 +``` + +The pipeline does the following: + +1. Imports the key-value pairs from the Vault server and exports them into the pipeline under `/meta/env_vars_to_export`. +2. Clones the main repository (note the special use of naming the step `main_clone`). This ensures that all subsequent commands are run [inside the project that was checked out]({{site.baseurl}}/docs/pipelines/steps/git-clone/#basic-clone-step-project-based-pipeline). +3. The `package_jar`, does a few special things to take note of: + - Spins up a [Service Container]({{site.baseurl}}/docs/pipelines/service-containers/) running Redis on port 6379 , and sets the password to the database using our exported environment variable + - Sets `maven.repo.local` to cache Maven dependencies into the local codefresh volume to [speed up builds]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/#caching-the-maven-dependencies) + - Runs unit tests and packages the jar. Note how you can directly refer to the service container's name (`my-redis-db-host`) when we set `server.host` + +You will see that the variable was correctly exported to the pipeline by running a simple `echo` command: + {% include image.html + lightbox="true" + file="/images/examples/secrets/vault-pipeline2.png" + url="/images/examples/secrets/vault-pipeline2.png" + alt="Vault pipeline variable" + caption="Vault pipeline variable" + max-width="100%" + %} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) + diff --git a/_docs/example-catalog/ci-examples/voting-app.md b/_docs/example-catalog/ci-examples/voting-app.md new file mode 100644 index 000000000..08cb4a5cf --- /dev/null +++ b/_docs/example-catalog/ci-examples/voting-app.md @@ -0,0 +1,93 @@ +--- +title: "Voting app" +description: "" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/voting-app-1/ + - /docs/python/voting-app/ +toc: true +--- +This voting application is a demo with which you can build an advanced composition that uses `Python, Redis, Postgres, Node.js, and .Net`. + +## Looking around +In the root of this repository you'll find a file named codefresh.yml, this is our build descriptor and it describes the different steps that comprise our process. Let's quickly review the contents of this file: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + unit-tests: + image: codefresh/buildpacks:nodejs-5 + working-directory : ${{initial-clone}} + commands: + - echo Installing npm modules silent + - npm install + - gulp test + - echo $(date) + + build-step: + #title: Build My Image #Display name for the step + type: build + image-name: containers101/cf-example-result + tag: ${{CF_BRANCH}} + build_arguments: + - OPTION_A=${{OPTION_A}} + - OPTION_B=${{OPTION_B}} + + push-to-registry: + type: push + #candidate: the image from the build step + candidate: ${{build-step}} + tag: ${{CF_BRANCH}} + + integration-tests-step: + type: composition + #location of the compostion on the filesystem of the cloned image + composition: './cf-compositions/voting-app-full.yml' + #run integration only when pushing to master + when: + branch: + only: + - master #can also be regex + composition-candidates: + #this will be the image that we will test + integ-test: + image: containers101/cf-example-tests:master + command: ./tests.sh + composition-variables: + - VOTING_OPTION_A=${{OPTION_A}} + - VOTING_OPTION_B=${{OPTION_B}} + + launch-composition: + type: launch-composition + environment-name: 'Test composition after build' + composition: './cf-compositions/voting-app-full.yml' + composition-variables: + - VOTING_OPTION_A=${{OPTION_A}} + - VOTING_OPTION_B=${{OPTION_B}} + + deploy to ecs: + image: codefresh/cf-deploy-ecs + commands: + - cfecs-update --image-name containers101/cf-example-result --image-tag ${{CF_BRANCH}} eu-west-1 vote-app result + environment: + - AWS_ACCESS_KEY_ID=${{AWS_ACCESS_KEY_ID}} + - AWS_SECRET_ACCESS_KEY=${{AWS_SECRET_ACCESS_KEY}} + when: + condition: + all: + pushCommit: 'includes(lower("${{CF_COMMIT_MESSAGE}}"), "[deploy]") == true' +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/containers101/cf-example-result){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) diff --git a/_docs/example-catalog/examples.md b/_docs/example-catalog/examples.md new file mode 100644 index 000000000..9dee45256 --- /dev/null +++ b/_docs/example-catalog/examples.md @@ -0,0 +1,127 @@ +--- +title: "CI/CD pipeline examples" +description: "A collection of examples for Codefresh pipelines" +group: example-catalog +redirect_from: + - /docs/examples-v01/ + - examples.html + - /docs/catalog-examples/ + - /docs/examples/ + - /docs/pipelines-examples/ + - /docs/pipelines/pipelines-examples/ +toc: true +--- +Codefresh enables you to define the steps of your pipeline in a [YAML file]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/). By default, the file is named `codefresh.yml`, and is located in the root directory of the repository. + +## CI examples + +### Programming-language specific examples + +Codefresh is agnostic as far as programming languages are concerned. All major programming languages are supported: + +- [Go Web App]({{site.baseurl}}/docs/example-catalog/ci-examples/golang-hello-world/) or [Go CLI]({{site.baseurl}}/docs/example-catalog/golang/goreleaser) +- [Spring Java app with Maven]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/) or [Gradle]({{site.baseurl}}/docs/example-catalog/ci-examples/gradle/). Also how to [upload JAR to Nexus/Artifactory]({{site.baseurl}}/docs/example-catalog/ci-examples/publish-jar/) +- Node [Express.js App]({{site.baseurl}}/docs/example-catalog/ci-examples/lets-chat/) or [React.js App]({{site.baseurl}}/docs/example-catalog/ci-examples/react/) +- [Php App]({{site.baseurl}}/docs/example-catalog/ci-examples/php) +- [Python Django App]({{site.baseurl}}/docs/example-catalog/ci-examples/django/) +- [Ruby On Rails App]({{site.baseurl}}/docs/example-catalog/ci-examples/ruby) +- [C]({{site.baseurl}}/docs/example-catalog/ci-examples/c-make/) or [C++]({{site.baseurl}}/docs/example-catalog/ci-examples/cpp-cmake) +- [Rust]({{site.baseurl}}/docs/example-catalog/ci-examples/rust/) +- [C# .NET core]({{site.baseurl}}/docs/example-catalog/ci-examples/dotnet/) +- [Scala App]({{site.baseurl}}/docs/example-catalog/ci-examples/scala-hello-world/) +- [Android (Mobile)]({{site.baseurl}}/docs/example-catalog/ci-examples/android/) + +### Source code checkout examples + +You can check out code from one or more repositories in any pipeline phase. Codefresh includes [built-in GIT integration]({{site.baseurl}}/docs/integrations/git-providers/) with all the popular GIT providers and can be used with [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) steps. + +- [Cloning Git repositories using the built-in integration]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) +- [Cloning Git repositories using manual Git commands]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout-custom/) +- [Checking out from Subversion, Perforce, Mercurial, etc ]({{site.baseurl}}/docs/example-catalog/ci-examples/non-git-checkout/) + +### Build/push examples + +Codefresh has native support for [building]({{site.baseurl}}/docs/pipelines/steps/build/) and [pushing]({{site.baseurl}}/docs/pipelines/steps/push/) Docker containers. +You can also compile traditional applications that are not Dockerized yet. + +- [Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +- [Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +- [Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +- [Build and Push an Image]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image) +- [Build an Image with build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) +- [Share data between steps]({{site.baseurl}}/docs/example-catalog/ci-examples/shared-volumes-between-builds) +- [Upload or download from a Google Storage Bucket]({{site.baseurl}}/docs/example-catalog/ci-examples/uploading-or-downloading-from-gs/) +- [Get Short SHA ID and use it in a CI process]({{site.baseurl}}/docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process) +- [Call a CD pipeline from a CI pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/call-child-pipelines) +- [Trigger a Kubernetes Deployment from a Dockerhub Push Event]({{site.baseurl}}/docs/example-catalog/ci-examples/trigger-a-k8s-deployment-from-docker-registry/) + + +### Unit and integration test examples + +Codefresh has support for both [unit]({{site.baseurl}}/docs/testing/unit-tests/) and [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) as well as [test reporting]({{site.baseurl}}/docs/testing/test-reports/). + +- [Run unit tests]({{site.baseurl}}/docs/example-catalog/ci-examples/run-unit-tests) +- [Run integration tests]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +- [Run integration tests with MongoDB]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +- [Run integration tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +- [Run integration tests with PostgreSQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +- [Run integration tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) +- [Populate a database with existing data]({{site.baseurl}}/docs/example-catalog/populate-a-database-with-existing-data) + +- [Shared volumes of service from composition step for other yml steps]({{site.baseurl}}/docs/example-catalog/shared-volumes-of-service-from-composition-step-for-other-yml-steps) +- [Launch Composition]({{site.baseurl}}/docs/example-catalog/ci-examples/launch-composition) +- [Launch Composition and define Service Environment variables using a file]({{site.baseurl}}/docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file) +- [Run multiple kinds of unit tests using fan-in-fan-out parallel pipeline]({{site.baseurl}}/docs/example-catalog/fan-in-fan-out) + +### Code coverage examples + +- [Run coverage reports with Codecov]({{site.baseurl}}/docs/example-catalog/ci-examples/codecov-testing) +- [Run coverage reports with Coveralls]({{site.baseurl}}/docs/example-catalog/ci-examples/coveralls-testing) +- [Run coverage reports with Codacy]({{site.baseurl}}/docs/example-catalog/ci-examples/codacy-testing) + +### Secrets examples + +Codefresh can automatically export secret key-value pairs using the Vault plugin from the [Step Marketplace](https://codefresh.io/steps/step/vault). + +- [Vault secrets in the Pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline) +- [Decryption with Mozilla SOPS]({{site.baseurl}}/docs/example-catalog/ci-examples/ci-examples/decryption-with-mozilla-sops) +- [GitOps with Bitnami sealed secrets]({{site.baseurl}}/docs/example-catalog/ci-examples/gitops-secrets) + +### Notification examples + +- [Send notification to Slack]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-slack) +- [Send notification to Jira]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-jira) + + +## CD examples + +### Preview environment examples + +Codefresh can automatically launch environments (powered by Docker swarm) to [preview a Pull Reqest or feature]({{site.baseurl}}/docs/getting-started/on-demand-environments/). The definition of the environment can come from an [existing composition]({{site.baseurl}}/docs/testing/create-composition/), a docker-compose file or an inline YAML. Preview environments can be launched manually or [automatically from pipelines]({{site.baseurl}}/docs/pipelines/steps/launch-composition/). + +- [MongoDB preload data]({{site.baseurl}}/docs/example-catalog/cd-examples/import-data-to-mongodb/) +- [NodeJS + Angular2 + MongoDB]({{site.baseurl}}/docs/example-catalog/cd-examples/nodejs-angular2-mongodb/) +- [NGINX Basic Auth]({{site.baseurl}}/docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth/) +- [Spring Boot + Kafka + Zookeeper]({{site.baseurl}}/docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper/) +- [Web terminal]({{site.baseurl}}/docs/example-catalog/cd-examples/web-terminal/) + +### Deployment examples + +Codefresh can deploy to any platform such as VMs, FTP/SSH/S3 sites, app servers, but of course it has great support for [Kubernetes clusters]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) and [Helm releases]({{site.baseurl}}/docs/new-helm/helm-releases-management/): + +- [Deploy to a VM with packer]({{site.baseurl}}/docs/example-catalog/cd-examples/packer-gcloud/) +- [Deploy to a VM with FTP]({{site.baseurl}}/docs/example-catalog/cd-examples/transferring-php-ftp) +- [Deploy to Tomcat using SCP]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp) +- [Deploy Demochat to a Kubernetes cluster]({{site.baseurl}}/docs/cd-examples/deploy-to-kubernetes/codefresh-kubernetes-integration-demochat-example/) +- [Use kubectl as part of freestyle step]({{site.baseurl}}/docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step) +- [Deploy with Kustomize]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-with-kustomize) +- [Deploy with Helm]({{site.baseurl}}/docs/example-catalog/cd-examples/helm) +- [Deploy with Terraform]({{site.baseurl}}/docs/example-catalog/cd-examples/terraform) +- [Deploy with Pulumi]({{site.baseurl}}/docs/example-catalog/cd-examples/pulumi) +- [Deploy to Nomad]({{site.baseurl}}/docs/example-catalog/cd-examples/nomad) +- [Deploy to Heroku]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-to-heroku/) +- [Deploy to Docker swarm]({{site.baseurl}}/docs/example-catalog/cd-examples/docker-swarm/) +- [Deploy to Elastic Beanstalk]({{site.baseurl}}/docs/example-catalog/cd-examples/elastic-beanstalk/) +- [Deploy to Amazon ECS/Fargate]({{site.baseurl}}/docs/example-catalog/cd-examples/amazon-ecs/) + + diff --git a/_docs/example-catalog/gitops-example.md b/_docs/example-catalog/gitops-example.md new file mode 100644 index 000000000..ba1c727d1 --- /dev/null +++ b/_docs/example-catalog/gitops-example.md @@ -0,0 +1,9 @@ +--- +title: "GitOps examples" +description: "A collection of examples for GitOps deployments" +group: example-catalog +sub_group: gitops-examples +toc: true +--- + +TBD \ No newline at end of file diff --git a/_docs/getting-started/architecture.md b/_docs/getting-started/architecture.md deleted file mode 100644 index 584fd5070..000000000 --- a/_docs/getting-started/architecture.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: "Architecture" -description: "" -group: getting-started -toc: true ---- - -Codefresh GitOps is built around an enterprise version of the Argo Ecosystem, fully compliant with the GitOps paradigm, with industry-standard security. -To cater to differing requirements and degrees of enterprise security, Codefresh supports hosted and hybrid installation environments for Codefresh runtimes. - -The sections that follow illustrate the architecture of the different installation environments, starting with a high-level overview of the Codefresh Platform. - -### Codefresh architecture - -The diagram shows a high-level view of the Codefresh Platform and its core components, the Codefresh Control Plane, the Codefresh Runtime, and the Codefresh Clients. - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-codefresh-simple.png" - url="/images/getting-started/architecture/arch-codefresh-simple.png" - alt="Codefresh Platform architecture" - caption="Codefresh Platform architecture" - max-width="100%" -%} - -{::nomarkdown} -
      -{:/} - -#### Codefresh Control Plane -The Codefresh Control Plane is the SaaS component in the platform. External to the enterprise firewall, it does not have direct communication with the Codefresh Runtime, Codefresh Clients, or the customer's organizational systems. The Codefresh Runtime and the Codefresh Clients communicate with the Codefresh Control Plane to retrieve the required information. - - -{::nomarkdown} -
      -{:/} - -#### Codefresh Runtime -The Codefresh Runtime is installed on a Kubernetes cluster, and houses the enterprise distribution of the Codefresh Application Proxy and the Argo Project. -Depending on the type of installation environment, the Codefresh Runtime is installed either in the Codefresh platform (hosted), or in the customer environment (hybrid). Read more in [Codefresh runtime architecture](#codefresh-runtime-architecture). - - -{::nomarkdown} -
      -{:/} - -#### Codefresh Clients - -Codefresh Clients include the Codefresh UI and the Codefresh CLI. -The Codefresh UI provides a unified, enterprise-wide view of deployments (runtimes and clusters), and CI/CD operations (Delivery Pipelines, workflows, and deployments) in the same location. -The Codefresh CLI includes commands to install hybrid runtimes, add external clusters, and manage runtimes and clusters. - -### Codefresh runtime architecture -The sections that follow show detailed views of runtime architecture in the different installation environments, and descriptions of the Codefresh Runtime components. - -* [Hosted GitOps runtime architecture](#hosted-gitops-runtime-architecture) - In this installation environment, the Codefresh Runtime is installed on a _Codefresh-managed cluster_ in the Codefresh platform. -* Hybrid runtime architecture: - In this installation environment, the Codefresh Runtime is installed on a _customer-managed cluster_ in the customer environment. The Codefresh Runtime with or without ingress controllers: - * [Ingress controller](#ingress-controller-hybrid-runtime-architecture) - * [Ingress-less](#ingress-less-hybrid-runtime-architecture) -* Runtime components - * [Codefresh Application Proxy](#codefresh-application-proxy) - * [Argo Project](#argo-project) - * [Request Routing Service](#request-routing-service) - * [Tunnel Server](#codefresh-tunnel-server) - * [Tunnel Client](#codefresh-tunnel-client) - - -#### Hosted GitOps runtime architecture -In the hosted environment, the Codefresh Runtime is installed on a K8s cluster managed by Codefresh. - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-hosted.png" - url="/images/getting-started/architecture/arch-hosted.png" - alt="Hosted runtime architecture" - caption="Hosted runtime architecture" - max-width="100%" -%} - -#### Ingress controller hybrid runtime architecture -Runtimes with ingress use an ingress controller to control communication between the Codefresh Runtime in the customer cluster and the Codefresh Platform. Ingress controllers are optimal when the cluster with the Codefresh Runtime is exposed to the internet. - - - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-hybrid-ingress.png" - url="/images/getting-started/architecture/arch-hybrid-ingress.png" - alt="Ingress-based hybrid runtime architecture" - caption="Ingress-based hybrid runtime architecture" - max-width="100%" -%} - -#### Ingress-less hybrid runtime architecture -Ingress-less runtimes uses tunneling to control communication between the Codefresh Runtime in the customer cluster and the Codefresh Platform. Ingress-less runtimes are optimal when the cluster with the Codefresh Runtime is not exposed to the internet. - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-hybrid-ingressless.png" - url="/images/getting-started/architecture/arch-hybrid-ingressless.png" - alt="Ingress-less hybrid runtime architecture" - caption="Ingress-less hybrid runtime architecture" - max-width="100%" -%} - - - -#### Codefresh Application Proxy -The Codefresh Application Proxy (App-Proxy) functions as the Codefresh agent, and is deployed as a service in the Codefresh Runtime. -For hybrid runtimes with ingress, the App-Proxy is the single point-of-contact between the Codefresh Runtime, and the Codefresh Clients, the Codefresh Platform, and any organizational systems in the customer environment. -For ingress-less hybrid runtimes, the Tunnel Client forwards the incoming traffic from the Tunnel Server using internal reverse proxy to the App-Proxy. - -The App-Proxy: -* Accepts and serves requests from Codefresh Clients either via the Codefresh UI or CLI -* Retrieves a list of Git repositories for visualization in Codefresh Clients -* Retrieves permissions from the Codefresh Control Plane to authenticate and authorize users for the required operations. -* Implements commits for GitOps-controlled entities, such as Delivery Pipelines and other CI resources -* Implements state-change operations for non-GitOps controlled entities, such as terminating Argo Workflows - -{::nomarkdown} -
      -{:/} - -#### Argo Project - -The Argo Project includes: -* Argo CD for declarative continuous deployment -* Argo Rollouts for progressive delivery -* Argo Workflows as the workflow engine -* Argo Events for event-driven workflow automation framework - - -{::nomarkdown} -

      -{:/} - -#### Request Routing Service -The Request Routing Service is installed on the same cluster as the Codefresh Runtime in the customer environment. -It receives requests from the ingress controller (ingress) or the Tunnel Client (ingress-less), and forwards the request URLs to the Application Proxy, and webhooks directly to the Event Sources. - ->Important: - The Request Routing Service is available from runtime version 0.0.543 and higher. - Older runtime versions are not affected as there is complete backward compatibility, and the ingress controller continues to route incoming requests. - -#### Tunnel Server -Applies only to _ingress-less_ runtimes in hybrid installation environments. -The Codefresh Tunnel Server is installed in the Codefresh platform. It communicates with the enterprise cluster located behind a NAT or firewall. - -The Tunnel Server: -* Forwards traffic from Codefresh Clients to the client (customer) cluster. -* Manages the lifecycle of the Codefresh Tunnel Client. -* Authenticates requests from the Codefresh Tunnel Client to open tunneling connections. - -{::nomarkdown} -
      -{:/} - -#### Tunnel Client -Applies only to _ingress-less_ runtimes in hybrid installation environments. - -Installed on the same cluster as the Codefresh Runtime, the Codefresh Tunnel Client establishes the tunneling connection to the Codefresh Tunnel Server via the WebSocket Secure (WSS) protocol. -A single Codefresh Runtime can have a single Tunnel Client. - -The Codefresh Tunnel Client: -* Initiates the connection with the Codefresh Tunnel Server. -* Forwards the incoming traffic from the Tunnel Server through the Request Routing Service to App-Proxy, and other services. - -{::nomarkdown} -
      -{:/} - - -### Customer environment -The customer environment that communicates with the Codefresh Runtime and the Codefresh Platform, generally includes: -* Ingress controller for ingress hybrid runtimes - The ingress controller is configured on the same Kubernetes cluster as the Codefresh Runtime, and implements the ingress traffic rules for the Codefresh Runtime. - See [Ingress controller requirements]({{site.baseurl}}/docs/runtime/requirements/#ingress-controller). -* Managed clusters - Managed clusters are external clusters registered to provisioned hosted or hybrid runtimes for application deployment. - Hosted runtimes requires you to connect at least one external K8s cluster as part of setting up the Hosted GitOps environment. - Hybrid runtimes allow you to add external clusters after provisioning the runtimes. - See [Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/). -* Organizational systems - Organizational Systems include the customer's tracking, monitoring, notification, container registries, Git providers, and other systems. They can be entirely on-premises or in the public cloud. - Either the ingress controller (ingress hybrid environments), or the Tunnel Client (ingress-less hybrid environments), forwards incoming events to the Codefresh Application Proxy. - -### Related articles -[Set up a hosted runtime environment]({{site.baseurl}}/docs/runtime/hosted-runtime/) -[Install a hybrid runtime]({{site.baseurl}}/docs/runtime/installation/) - - - - diff --git a/_docs/getting-started/cd-codefresh.md b/_docs/getting-started/cd-codefresh.md new file mode 100644 index 000000000..2dc6a70fc --- /dev/null +++ b/_docs/getting-started/cd-codefresh.md @@ -0,0 +1,91 @@ +--- +title: "Codefresh for CD" +description: "Continuous deployment (CD) with Codefresh pipelines" +group: getting-started +toc: true +--- + +Work in progress TBD + + + + +## Connecting to Kubernetes +Continuous deployment starts with Kubernetes clusters, and Codefresh integrates with any known cluster provider for Kubernetes through a few simple steps. Connect your Google, Azure, Amazon Kubernetes cluster to Codefresh through simple integration steps. +For those Kubernetes clusters that are not in our list of cluster providers, you can manually enter your cluster settings to add any generic Kubernetes cluster. + +See [Connecting a Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster). + +## Deploying to Kubernetes +Codefresh offers a variety of options for you to choose from when deploying to Kubernetes. +Deploy to Kubernetes from the Codefresh UI, or programmatically through dedicated steps in pipelines, avoiding the need for `kubectl` commands. + +**On-demand deployment** +For quick and easy deployment, deploy on-demand from the Codefresh UI. + +**Dedicated steps in pipelines** +We have the `deploy` step, and the more advanced `cf-deploy-kubernetes`step that enables simple templating on Kubernetes manifests. + +Codefresh pipelines also support Kustomize and Helm for deployments through freestyle steps. + +Finally, if you are familiar with and want to work with `kubectl`, run your own custom `kubectl` commands in a freestyle step. Read more in [kubectl](#kubectl). + +See [Deployment options for Kubernetes]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/). + +## kubectl +`kubectl` is the command line interface for managing kubernetes clusters. Running custom `kubectl` commands in a freestyle step gives your maximum flexibility with cluster deployments. +Codefresh automatically sets up your config context with your connected clusters. The config context is the value of the `$CF_KUBECONFIG_PATH` variable, which expands to `/codefresh/volume/sensitive/.kube/config` within the shared step volume. + +Codefresh has a public Docker image for kubectl at [Docker Hub](https://hub.docker.com/r/codefresh/kubectl/tags){:target="\_blank"} that you can use. + +Because Codefresh automatically sets up your `kubeconfig` files with the information from your cluster integrations, you can modify the current config context and run any `kubectl` command you want applied to that context. For example, leverage the parallel capability of Codefresh pipelines to create two Docker images and deploy them to two different clusters with custom `kubectl` commands. + +See [Running custom kubectl commands]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/). + +## Helm and Codefresh +Codefresh supplies a built-in Helm repository with every Codefresh account. And supports, besides public HTTP repositories, several private, authenticated Helm repositories. + +**Connect to Helm repositories** +In addition to the official Helm repositories which are displayed in the Helm charts, Codefresh allows you connect with any external Helm repository through simple integrations. You can then inject the Helm repository context into your pipelines by selecting the repository name. + +**Build Helm charts** +Install Helm charts from Helm repositories, or build a new one. + + + +Deploy Helm charts +Deploy the Helm chart to a Kubernetes cluster, Helm repo, or both. + + +## Dashboards + +TBD + \ No newline at end of file diff --git a/_docs/getting-started/ci-codefresh.md b/_docs/getting-started/ci-codefresh.md new file mode 100644 index 000000000..6d0015b27 --- /dev/null +++ b/_docs/getting-started/ci-codefresh.md @@ -0,0 +1,98 @@ +--- +title: "Codefresh for CI" +description: "Continuous integration (CI) with Codefresh pipelines" +group: getting-started +toc: true +--- + + + +Work in progress + + +## Docker images +WBuilding a Docker image from the source code is probably the most common and basic requirement for a CI pipeline. In Codefresh you can build, push, and promote Docker images, using declarative YAML and credentials that are defined once stored centrally. + +**Build and push image** +Building a Dockerfile in a pipeline works in the same way as building the Dockerfile locally on your workstation. The `build` step in Codefresh enables you to build a Docker image in a completely declarative manner, and to automatically push it to your default Docker registry without any configuration. + +See: +[Build and push Docker images]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/) + + +**View image** +The Images dashboard displays images from all registries connected to Codefresh. Every image is enriched with Git branch, Git hash and commit message, and any tags defined for the image. + +See: +[Viewing Docker images]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/#viewing-docker-images) + + + +**Promote image** +Promote an image by copying it from one registry to another. You can promote images either from the Codefresh UI, or automatically from pipelines by specifying an existing image in the pipeline step. + +See: +[Promoting Docker images]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/#viewing-docker-images) + + +## Code compilation +TBD + +## Unit testing +Codefresh supports all testing frameworks, including mocking frameworks, for all popular programming languages. Easily run unit tests on the source code of the application for every commit or pull request (PR) through our freestyle step in pipelines. + +Run any type of unit tests in Codefresh pipelines, from smoke tests in a dockerfile, to tests with external or application images for simple applications, and evenrun them on a special testing image for complex applications. +You can create test reports and view them whenever you need. + +See: +[Run unit tests example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-unit-tests/) + + +## Integration testing +Compared to unit tests that run on the source code, integration tests run on the application itself. You need to either launch the application itself, or one or more external services such as a database. +In Codefresh, you can launch these sidecar containers within the pipeline through compositions and service containers. + + +See: +[Run integration tests example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integrations-tests/) + +## Security scanning +Security scans are critical to deploying quality code. With Codefresh, in addition you can control when to implement the security scan, and then view the scan results in the Codefresh UI, without having to go to the security platform. + +**Security scan platforms** +Codefresh can integrate with any security scanning platform that scans source code or Docker images for vulnerabilities. We already have ready-to-use Docker images for several security platforms such as Anchore, Aqua Security, Clair, Twistlock and WhiteSource. For the full list, visit our [Plug-ins library](https://codefresh.io/steps/){:target="\_blank"}. + +**Scan timing in pipeline step** +The security scan is implemented through a freestyle step, inserted anywhere in the pipeline. The fact that you can insert the step anywhere allows you to control when the scan is executed, for example, before the source code is packaged in a container, or before the container is stored in a registry or deployed to production, or any combination of these. + +**View scan results** +As with any scan, the final step is viewing the scan results. Make the scan results available in Codefresh release dashboards (Test Report button) by attaching analysis reports to the pipeline build. + +**Security annotations** +Correlate the Docker images in Codefresh with the results of the security scanning platform by adding annotations for custom metatdata. For example, you can add annotations such as the number of issues or the URL of the full report. + +See: +[Security scanning tests]({{site.baseurl}}/docs/testing/security-scanning/) +[Test reporting modes]({{site.baseurl}}/docs/testing/test-reports/) +[Metadata in Docker images]({{site.baseurl}}/docs/pipelines//docker-image-metadata/) + + + +## Code quality coverage +Good quality code is central to any CI platform or tool. Codefresh integrates with the top code quality platforms/tools in the market to track code coverage, inspect code quailty, and generate code-coverage analysis reports. + +Three steps to +* Set up integrations with the platforms/tools (Coverall, SonarQube, Codecov, for example). +* Copy and paste the ready-to-use step for your platform/tool into your pipeline from our [Plug-ins library](https://codefresh.io/steps/){:target="\_blank"}. +* Reference them by name in the pipeline step, and view the updated reports in the respective UIs. + +See: +[Code coverage examples]({{site.baseurl}}/docs/example-catalog/examples/#code-coverage-examples) + + diff --git a/_docs/getting-started/concepts.md b/_docs/getting-started/concepts.md new file mode 100644 index 000000000..21943d500 --- /dev/null +++ b/_docs/getting-started/concepts.md @@ -0,0 +1,113 @@ +--- +title: "Concepts in Codefresh" +description: "Understand terminology and nuances in Codefresh" +group: getting-started +toc: true +--- +Work in progress + + +### Runtime +A Runtime in Codefresh is a GitOps installation in your Codefresh account, in either a Hosted or Hybrid installation environment. Hosted Runtimes are installed on a Codefresh cluster and managed by Codefresh. Hybrid Runtimes are installed on customer clusters, and managed by the customers. +You can install a single Hosted runtime, and multiple Hybrid Runtines in a Codefresh account. + + + +A single Runtime can connect to and manage multiple remote clusters. + + +See: +[GitOps runtime architecture]({{site.baseurl}}/docs/installation/runtime-architecture) +[Hybrid GitOps Runtime installation]({{site.baseurl}}/docs/installation/gitops/hybrid-gitops) +[Hosted GitOps Runtime installation]({{site.baseurl}}/docs/installation/gitops/hosted-runtime) + + + +### Runner +The Runner is the hybrid installation option for CI/CD pipelines in your Codefresh account. The Runner is installed as a Kubernetes native application on any Kubernetes-compliant cluster. It allows you to run pipelines on your own Kubernetes cluster, including private clusters behind company firewalls. + +Codefresh Runner gives you: +* Access to secure services (such as Git repositories or databases) that are behind the firewall and normally not accessible to the public cloud. +* The ability to use special resources in your Codefresh pipeline that are unique to your application, GPU nodes or other special hardware only present in your data center. +* Complete control over the build environment in addition to resources for pipelines. + +Every Runner installation creates a runtime enviroment in your account. Assign the Runner to any pipeline to automatically run the pipeline in your own cluster. External integrations (such as Docker registry or Helm repositories) are also available to the Runner making pipelines exactly the same regardless of their runtime environment. + +You can have multiple Runner installations in the same Codefresh account. A Runner can also manage multiple remote clusters in your account. + +See: +[Codefresh Runner installation]({{site.baseurl}}/docs/installation/codefresh-runner) +[Runner installation behind firewalls]({{site.baseurl}}/docs/reference/behind-the-firewall) + + +### Project +A project is a top-level entity in Codefresh for grouping related pipelines. Projects can group pipelines according to any criteria that is relevant to your enterprise. The criteria can be logical and based on teams, departments, or location for example, or funtional, and based microservices in applications. +Projects centralize viewing and configuration settings for the pipelines that belong to them: +* Selecting a pipeline shows the other pipelines in the same project. +* Define access control and user-defined variables for the project, and they are inherited by all the pipelines assigned to the project + +There are no limits to the number of projects you can create in your account. You can also create standalone pipelines and assign them later to a project, or detach a pipeline assigned to a project. + +See: +[Projects in pipelines]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-concepts) + +### Pipeline +The pipeline is the central component in Codefresh that implements CI/CD processes. Everything for CI/CD in Codefresh starts and ends with pipelines. a pipeline can do only CI, only CD, both CI and CD, or run any custom action, such as unit and integration tests. + +A CI pipeline can compile and package code, build and push Docker images. A CD pipeline can deploy applications/artifacts to VMs, Kubernetes clusters, FTP sites, S3 buckets, and more. And a CI/CD pipeline can combine code compilation, integration, and deployment for full CI/CD. + +More to be added... + + + + +### Applications +An application is a deployment to a Kubernetes, or any Kubernetes-compatiale cluster or clusters. +Codefresh supports two types of applications: +* Containerized applications packaged as Docker images or +* Argo CD applications + +**Containerized applications** +Containerized applications are compiled, packaged, and deployed through Codefresh pipelines. Codefresh has native support for Docker artifacts, and also supports non-Dockerized applications that don’t use a Dockerfile for the actual build. + +Deploy an application directly to Kubernetes through the Codefresh UI, or use Helm as a package manager to deploy to Kubernetes, again from Codefresh. +Codefresh offers several levels of visibility into your deployments : +* The Kubernetes dashboard displays the status of pods and Docker images. +* The Helm dashabord displays the applications deployed to the cluster through Helm packages. +* The Environment dashbaord displays both Helm and Kubernetes releases, the status of the cluster, and most importantly that of the builds that affect it. + +See: + + +**Agro CD applications** +Argo CD applications conform to Argo CD's application definition CRD (Custom Resource Definition). Argo CD supports several types of Kubernetes manifests, including Jsonnet, Kustomize applications, Helm charts, and YAML/json files, and supports webhook notifications from Git. + +Create Argo CD applications that are fully GitOps-compliant from the Codefresh UI. Work in form mode or directly in YAML in the Create Application wizard. Built-in validation makes it easy to identify and fix errors before commit. The application manifest is generated, committed to Git, and synced to your cluster. +After creation, you can edit and optimize the application, + +Just as with Dockerized applications, you get full visibily into the applications and their deployment thorugh the global Analytics, DORA metrics, and the Application dashboards. The Applications dashboard displays the individual deployments across your enterprise. Drill down shows the current state of all the resources in the application with actions and detilaed information for each resource. + + + +### Triggers +TBD + +### Events +TBD + diff --git a/_docs/getting-started/csdp-introduction.md b/_docs/getting-started/csdp-introduction.md deleted file mode 100644 index 6d48105f0..000000000 --- a/_docs/getting-started/csdp-introduction.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: "Introducing Codefresh" -description: "" -group: getting-started -toc: true ---- - -Codefresh is a full-featured, turn-key solution for application deployments and releases. Powered by Argo, Codefresh uses Argo CD, Argo Workflows, Argo Events, and Argo Rollouts, extended with unique functionality and features essential for enterprise deployments. - -Codefresh offers security, maintainability, traceability, and most importantly, a single control plane for all stakeholders, be they developers, operators, product owners or project managers. - -With Codefresh, you can: - -* Deliver software at scale by managing hundreds or thousands of deployment targets and applications -* Get a secure, enterprise-ready distribution of Argo with built-in identity, RBAC (role-based access control), and secrets -* Gain clear visibility across all deployments and trace changes and regressions from code to cloud in seconds -* Get enterprise-level dedicated support for Argo deployments -* Get insights into every aspect of your CI/CD with smart dashboards -* Manage multiple runtimes and multiple clusters in a single pane of glass - - -### Codefresh deployment models - -Codefresh supports hosted and hybrid deployments: - -* **Hosted** deployment or Hosted GitOps, a hosted and managed version of Argo CD. The SaaS version of Codefresh, the runtime is hosted on a Codefresh cluster (easy setup) and managed by Codefresh (zero maintenance overhead). -Click once to provision the hosted runtime, and start deploying applications to clusters without having to install and maintain Argo CD. - - -* **Hybrid** deployment, with the runtime hosted on the customer cluster and managed by the customer. -The hybrid offering retains runtimes within the customer infrastructure while giving you the power of Argo CD with Codefresh's CI and CD tools, to help achieve continuous integration and continuous delivery goals. - -For details, see [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture). - - -### Codefresh and open source Argo -Codefresh brings the power of the Argo project to your Kubernetes deployments: - -* Argo CD for declarative continuous deployment -* Argo Rollouts for progressive delivery -* Argo Workflows as the workflow engine -* Argo Events for event-driven workflow automation framework - -Codefresh creates a conformed fork of the Argo project, providing an enterprise-supported version of the same, enhanced with unique functionality. - - - -### Codefresh and GitOps -Codefresh is GitOps-centric, and supports GitOps from the ground up. Codefresh leverages Argo components to have the entire desired state applied from Git to your Kubernetes cluster, and then reported back to Codefresh. -In addition: - -* Every state change operation in Codefresh is made via Git -* Codefresh audit log is derived from the Git changelog -* Codefresh access control is derived from Git permissions - -For details, see [entity model]({{site.baseurl}}/docs/getting-started/entity-model) and [access control]({{site.baseurl}}/docs/administration/access-control). - - -### Insights in Codefresh -Codefresh makes it easy to both access and visualize critical information for any CI/CD resource at any stage, at any level, and for anyone, from managers to DevOps engineers. - -{::nomarkdown} -
      - {:/} - -#### Global deployment analytics - -The Home dashboard presents system-wide highlights in real-time, making it an ideal tool for management. -Get insights into important KPIs for entities across runtimes and clusters, in the same location. View status of runtimes and managed clusters, deployments, failed deployments with rollbacks, most active applications, and Delivery Pipelines. - -{% include - image.html - lightbox="true" - file="/images/incubation/home-dashboard.png" - url="/images/incubation/home-dashboard.png" - alt="Global deployment analytics" - caption="Global deployment analytics" - max-width="70%" -%} - -{::nomarkdown} -
      - {:/} - -#### DORA metrics - -DORA metrics has become integral to enterprises wanting to quantify DevOps performance, and Codefresh has out-of-the-box support for it. - - -Apart from the metrics themselves, the DORA dashboard in Codefresh has several features such as the Totals bar with key metrics, filters that allow you to pinpoint just which applications or runtimes are contributing to problematic metrics, and the ability to set a different view granularity for each DORA metric. - -See [DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/). - -{% include - image.html - lightbox="true" - file="/images/incubation/intro-dora-metrics.png" - url="/images/incubation/intro-dora-metrics.png" - alt="DORA metrics" - caption="DORA metrics" - max-width="60%" -%} - -{::nomarkdown} -
      - {:/} - -#### Application analytics and analysis - -The Applications dashboard displays a unified view of applications across runtimes and clusters. No matter what the volume and frequency of your deployments, the Applications dashboard makes it easy to track them. Search for Jira issues, commit messages, committers, and see exactly when and if the change was applied to a specific application. - -See [Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/). - -{::nomarkdown} -
      - {:/} - -#### Delivery Pipelines -The Delivery Pipelines dashboard displays aggregated performance analytics based on the pipeline’s workflows, including step analytics across all the workflows in the pipeline. - -{::nomarkdown} -
      - {:/} - -#### Workflows -View and monitor submitted workflows across all pipelines in the Workflows dashboard. Select a time range, or view up to fifty of the most recent workflows for all the pipelines in the runtime. Drill down to any workflow for further analysis. -{::nomarkdown} -
      - {:/} - -### CI/CD resources in Codefresh -Wizards make it easy to create delivery pipelines and applications. Smart views and options make it easier to monitor and manage them. -{::nomarkdown} -

      - {:/} - -#### Delivery Pipelines - -Delivery Pipelines are where the CI magic happens in Codefresh. Our pipeline creation wizard removes the complexity from creating, validating, and maintaining pipelines. Every stage has multi-layered views of all the related Git change information for the pipeline. -See [Create delivery pipelines]({{site.baseurl}}/docs/pipelines/create-pipeline/). - -{::nomarkdown} -
      - {:/} - -#### Workflows -Drill down into a workflow to visualize the connections between the steps in the workflow. -A unique feature is the incorporation of Argo Events into the workflow visualization. You get a unified view of Argo Events and Argo Workflows in the same location, the events that triggered the workflow combined with the workflow itself. - -{::nomarkdown} -
      - {:/} - -#### Workflow Templates -Select from ready-to-use Workflow Templates in the Codefresh Hub for Argo or create your own custom template. The **Run** option allows you to test a new Workflow Template, or changes to an existing template, without needing to first commit the changes. - - {% include - image.html - lightbox="true" - file="/images/whats-new/wrkflow-template-main.png" - url="/images/whats-new/wrkflow-template-main.png" - alt="Workflow Templates" - caption="Workflow Templates" - max-width="70%" - %} - -{::nomarkdown} -
      - {:/} - -#### Applications -Create GitOps-compliant applications, and manage the application lifecycle in the Codefresh UI. - -Define all application settings in a single location through the intuitive Form mode or directly in YAML, and commit all changes to Git. -For easy access, after commit, the configuration settings are available in the Applications dashboard along with the deployment and resource information. - -See [Applications]({{site.baseurl}}/docs/deployment/create-application/). - -{% include - image.html - lightbox="true" - file="/images/applications/add-app-general-settings.png" - url="/images/applications/add-app-general-settings.png" - alt="Application creation in Codefresh" - caption="Application creation in Codefresh" - max-width="60%" -%} - -### GitOps CI integrations - -If you have Hosted GitOps, and your own CI tools for pipelines and workflows, enrich your deployments with CI information without disrupting existing processes. -Simply connect your CI tools to Codefresh, and our new report image template retrieves the information. For example, add the report image step in your GitHub Actions pipeline and reference the different integrations for Codefresh to retrieve and enrich the image with Jira ticket information. - -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). - -{% include - image.html - lightbox="true" - file="/images/incubation/github-action-int-settings.png" - url="/images/incubation/github-action-int-settings.png" - alt="Image enrichment with GitHub Actions integration" - caption="Image enrichment with GitHub Actions integration" - max-width="60%" -%} - - -### What to read next -[Quick start tutorials]({{site.baseurl}}/docs/getting-started/quick-start) \ No newline at end of file diff --git a/_docs/getting-started/entity-model.md b/_docs/getting-started/entity-model.md deleted file mode 100644 index 302940c17..000000000 --- a/_docs/getting-started/entity-model.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Entity model" -description: "" -group: getting-started -toc: true ---- - -The Codefresh entity model is derived from these entity types: -* Codefresh account/user management entities -* Argo ecosystem entities -* Workflow, runtime, and Git Source entities -* Codefresh-specific entities such as pipelines, images, and applications - - - -### Codefresh account/user management entities -The account/user management entity types includes entities that do not share a direct relationship to the Codefresh domain. These are enterprise-specific entities in standard SAAS solutions. - -#### Account -Every user who signs in to Codefresh gets a private administrator user account. - -If you received an invitation to Codefresh, instead of a private user account, you are added as a collaborator to the main account. Your permissions are based on those explicitly assigned to you. - -The number of collaborators in an account is defined by the current plan associated with it. - -#### User -A user in Codefresh is one who has completed the sign-up process, and can log in using authorized third-party systems such as: -* GitHub -* Bitbucket -* GitLab -* Azure -* Google - -> If you configure SSO (Single Sign-On) for the account, the user can log in using only the configured SSO. - -#### Billing -For details, please contact [Sales](mailto:sales@codefresh.io?subject=[Codefresh] Codefresh billing inquiry). - -#### Single Sign-On (SSO) -Enterprise accounts can configure SSO. For details, see [Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/administration/single-sign-on/). - -#### Security configuration -Security settings include: -* Inactivity timeout per collaborator account -* Domain restriction for invitations - -### Argo ecosystem entities -Codefresh is built on top of the successful open source Argo project, and as such, supports all the native Argo project-entities. -You can apply every supported entity that exists in the open source projects to your Codefresh account. - -### Workflow -Codefresh shows all the workflows executed with Argo Workflows. -Workflows with pipelines display links to the pipelines. Users can terminate or retry a workflow, and view its logs. - -### Runtime -A runtime represents an installation of Codefresh on the customer's K8s cluster, and contains all the components required to perform all tasks on the cluster. - -Review [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/), and [runtime installation ]({{site.baseurl}}/docs/runtime/installation/). - -### Git Source -A Git Source is a link to a Git repository that stores GitOps-controlled entities. You can create as many as Git Sources as you require. - -To understand how to control Git Sources using GitOps, see [access control]({{site.baseurl}}/docs/administration/access-control/). - -### Codefresh high-level entities -Codefresh creates high-level views that better represents, abstracts, and connects all the different entities in the Argo ecosystem. - -#### CI/CD pipeline -A pipeline is a Codefresh-representation of Argo Events, comprising an Argo Events Sensor and Argo Events Triggers. Every trigger in a sensor becomes a different pipeline in Codefresh. The same sensor can be associated with multiple pipelines, if it has different trigger conditions. - -A pipeline links to the following Argo Events entities: -* Sensor -* Event Source -* Workflow Template (or a cluster-level Workflow Template) - -A pipeline also shows all the workflows created from the triggered event associated with that pipeline. - -#### Image -An image represents a built artifact of a Docker image, reported to Codefresh using a dedicated interface. - -Users can use a predefined [Argo Workflow Template](https://codefresh.io/argohub/workflow-template/codefresh-csdp) to help with transferring the image information to Codefresh. - -#### Application -A holistic view of all your Argo CD and Argo Rollouts deployments that link to the underlying artifacts and workflows. diff --git a/_docs/getting-started/faq.md b/_docs/getting-started/faq.md deleted file mode 100644 index bd189aab4..000000000 --- a/_docs/getting-started/faq.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Frequently asked questions" -description: "" -group: getting-started ---- -We have collected a few of the common questions on the Codefresh solution. - -For questions on Codefresh Classic, navigate to our [FAQs for Codefresh Classic](https://codefresh.io/docs/docs/getting-started/faq/){:target="\_blank"}. - - -**Q. What is the Codefresh platform?** - -A. The Codefresh platform is a full-featured, turn-key solution for application deployments and releases. Powered by the Argo Project, Codefresh uses Argo CD, Argo Workflows, Argo Events, and Argo Rollouts, extended with unique functionality and features essential for enterprise deployments. - -**Q. Which deployment environments does Codefresh support?** - -A. The current release of Codefresh supports hosted and hybrid deployment environments. Stay tuned for our announcement on support for on-premises deployments. - -**Q. How does Codefresh relate to Open Source Argo?** - -A. Codefresh creates a conformed fork of the Argo Project. You get an enterprise-supported version of the Argo Project comprising Argo Workflows, Argo Events, Argo CD, and Argo Rollouts. You can take advantage of the Argo Project offering, with the extended functionality that Codefresh brings to it. - -**Q. I already have a Kubernetes cluster with Argo CD. Can I install Codefresh on the same cluster?** - -A. Hybrid runtimes must be installed on a clean Kubernetes cluster without any Argo Project components. Because we create a conformed fork of the Argo Project in Codefresh, installing it on a cluster with Argo components creates a conflict that will cause the installation to fail. - -**Q. I have resources on my Kubernetes cluster that I want to use in Codefresh. What should I do?** - -A. We will be giving detailed instructions on migrating resources from Kubernetes clusters to Codefresh-based Kubernetes clusters. - -**Q. Does Codefresh support all Git providers?** -A. At the time of writing, Codefresh supports GitHub. We are working to quickly extend support to GitLab and Bitbucket. Stay tuned. - -**Q. What are the browser requirements for the Codefresh UI?** - -A. Officially, we support the latest version of the Chrome browser. Any browser released in the last couple of years should work without major issues. -The following browser versions are **NOT** supported: - -{: .table .table-bordered .table-hover} -| Browser | Version | Date released | -| -------------- | ---------------------------- |-------------------------| -| Chrome | < 51 | May 2016 | -| Firefox | < 54 | Jun 2017 | -| Edge | < 14 | Aug 2016 | -| Safari | < 10 | Sep 2016 | - - -## Migration from Codefresh Classic - -**Q. I have Codefresh Classic. Can I migrate to Codefresh?** -A. At the time of writing, we are working on making the migration from Codefresh Classic to Codefresh as seamless as possible. Stay tuned for the migration announcement. - diff --git a/_docs/getting-started/gitops-codefresh.md b/_docs/getting-started/gitops-codefresh.md new file mode 100644 index 000000000..059a2136e --- /dev/null +++ b/_docs/getting-started/gitops-codefresh.md @@ -0,0 +1,8 @@ +--- +title: "Codefresh for GitOps" +description: "Argo CD with Codefresh GitOps" +group: getting-started +toc: true +--- + +Work in progress TBD \ No newline at end of file diff --git a/_docs/getting-started/gitops.md b/_docs/getting-started/gitops.md deleted file mode 100644 index d7542fbbb..000000000 --- a/_docs/getting-started/gitops.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "GitOps approach" -description: "" -group: getting-started -toc: true ---- - -> In the documentation, Kubernetes and K8s are used interchangeably. - -### GitOps - -The Codefresh platform is built entirely around the concept of GitOps, a set of best practices where the entire code delivery process is controlled via Git, including infrastructure and application definition, and automation to complete updates and rollbacks. - -To fully understand the benefits of Codefresh, let's briefly recap GitOps, and how it can help: - -#### Infrastructure as code, the entire system described declaratively - Infrastructure as code is a modern approach that "declaratively" describes the state of a system as code, while having that single source of truth applied to an end-system. The end-systems in most cases are modern cloud native tools. - - Declarative means that configuration is guaranteed by a set of facts, instead of by a set of instructions. With your end system's declarations versioned in Git, you have a single source of truth. You can then both easily deploy and roll back your end system according to the state changes in Git. And more important, if and when disaster strikes, you can also reproduce your cluster’s infrastructure reliably and quickly. - - GitOps is just a specific case of infrastructure as code where the end system is a Kubernetes cluster. - -#### Desired system state versioned in Git - With the declaration of your system stored in a version control system, and serving as your canonical source of truth, you have a single place from which everything is derived and driven. Now not only your application code is in Git, but also all the information required to install and manage your application, including service definition, deployment information, and more. - - Developers can continue with the familiar and convenient approaches they are already using for their applicative code. In addition, Git makes complicated tasks like collaboration (via pull requests), security (via signed commits), permissions (repository permissions), and rollback, as trivial as they can get. - - -#### Use dedicated tools to implement transfer of desired state into the end system - Once the state of your end-system is declared and kept under version control, you need a tool and process to apply the updated desired state into the end system. - - One of the tools for implementing infrastructure as code in the realm of DevOps is [Terraform](https://www.terraform.io/), for example. - - While you can implement GitOps (infrastructure as code for Kubernetes), using a battle-ready tool like Terraform which has a plugin system that also supports Kubernetes, K8s has many nuances that differ from a traditional sync process to a cloud system or some other standard REST API end system. - - To address the specific use cases of Kubernetes, there are new tools dedicated to implementing GitOps (infrastructure as code for k8s), such as [ArgoCD](https://github.com/argoproj/argo-cd). - - diff --git a/_docs/getting-started/intro-to-codefresh.md b/_docs/getting-started/intro-to-codefresh.md new file mode 100644 index 000000000..850aad11c --- /dev/null +++ b/_docs/getting-started/intro-to-codefresh.md @@ -0,0 +1,113 @@ +--- +title: "Introduction to Codefresh" +description: "What is Codefresh?" +group: getting-started +toc: true +--- + +TBD + + + \ No newline at end of file diff --git a/_docs/getting-started/main-concepts.md b/_docs/getting-started/main-concepts.md deleted file mode 100644 index c2d4f4180..000000000 --- a/_docs/getting-started/main-concepts.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Main concepts" -description: "" -group: getting-started -toc: true ---- - -### Built on top of the open source Argo -Codefresh maintains a [conformed](https://github.com/argoproj/argo-conformance-program) fork of the following Argo components, providing an enterprise-supported version of them: -* [Argo CD](https://github.com/argoproj/argo-cd): Declarative continuous deployment for Kubernetes. -* [Argo Rollouts](https://argoproj.github.io/argo-rollouts/): Progressive Delivery for Kubernetes. -* [Argo Workflows](https://github.com/argoproj/argo-workflows): Workflow engine for Kubernetes. -* [Argo Events](https://github.com/argoproj/argo-events): Event-driven workflow automation framework. - -For details, see [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/). - -### Hybrid behind firewall model -Codefresh performs an installation, called a Runtime, on the user's K8s cluster. The Runtime contains all required components for the Codefresh experience. - -For details, see [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/). - -### GitOps native approach -Codefresh is built entirely on the heavily-adopted concept of GitOps. Read the detailed explanation on our [GitOps approach]({{site.baseurl}}/docs/getting-started/gitops/).
      -Codefresh leverages Argo components (Argo CD and Argo Events), to have the entire desired state applied from Git to the user's K8s cluster, and also reported back to Codefresh platform. - -### Every state change operation in Codefresh is made via Git -Codefresh has taken the GitOps approach a step forward by making our entire entity model fully controlled by GitOps via Codefresh, meaning that the entire state of your account is maintained in Git. For details, see [entity model]({{site.baseurl}}/docs/getting-started/entity-model/). - -Codefresh provides a full front-end experience powered by a strong API layer (GraphQL), and every state change (via GraphQL mutation) actually performs a commit on behalf of the user to Git. - -### Audit log derived from Git changelog -Codefresh has built its sophisticated but simple audit log on all operations to the system, for both the Git change and the log of API calls that have been made to the system. -For details, see [audit]({{site.baseurl}}/docs/administration/audit/). - -### Access control derived from Git permissions -Codefresh has built its sophisticated but simple access control model on top of the existing Git operations that are defined externally to the system.
      -For details, see [access control]({{site.baseurl}}/docs/administration/access-control/). diff --git a/_docs/integrations/ci-integrations.md b/_docs/gitops-integrations/ci-integrations.md similarity index 89% rename from _docs/integrations/ci-integrations.md rename to _docs/gitops-integrations/ci-integrations.md index f43ad8e40..f1779d19b 100644 --- a/_docs/integrations/ci-integrations.md +++ b/_docs/gitops-integrations/ci-integrations.md @@ -1,18 +1,18 @@ --- -title: "CI integrations" +title: "GitOps CI integrations" description: "" -group: integrations +group: gitops-integrations toc: true --- -Use Codefresh's Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. +Use Codefresh Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. You can connect a third-party CI solution to Codefresh, such as GitHub Actions for example, to take care of common CI tasks such as building/testing/scanning source code, and have Codefresh Hosted GitOps still responsible for the deployment, including image enrichment and reporting. The integration brings in all the CI information to your images which you can see in the Images dashboard. -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). +See [Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/). -### Codefresh image reporting and enrichment action +## Codefresh image reporting and enrichment action To support the integration between Codefresh and third-party CI platforms and tools, we have created dedicated actions for supported CI tools in the Codefresh Marketplace. These actions combine image enrichment and reporting through integrations with issue tracking and container registry tools. >You can also configure the integration directly in the Codefresh UI, as described in [Connect a third-party CI platform/tool to Codefresh](#connect-a-third-party-ci-platformtool-to-codefresh). @@ -29,17 +29,17 @@ Use the action as follows: 1. When the pipeline completes execution, Codefresh retrieves the information on the image that was built and its metadata through the integration names specified (essentially the same data that Codefresh CI would send automatically). 1. View the image in Codefresh's [Images dashboard]({{site.baseurl}}/docs/deployment/images/), and in any [application]({{site.baseurl}}/docs/deployment/applications-dashboard/) in which it is used. -### Connect a third-party CI platform/tool to Codefresh -Connecting the CI platform/tool to Codefresh from the UI includes configuring the required arguments, and then generating and copying the YAML manifest for the report image to your pipeline. +## Connect a third-party CI platform/tool to GitOps +Connecting the CI platform/tool to GitOps from the UI includes configuring the required arguments, and then generating and copying the YAML manifest for the report image to your pipeline. -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**GitOps Integrations**](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. 1. Filter by **CI tools**, then select the CI tool and click **Add**. 1. Define the arguments for the CI tool: - [Codefresh Classic]({{site.baseurl}}/docs/integrations/ci-integrations/codefresh-classic/) - [GitHub Action]({{site.baseurl}}/docs/integrations/ci-integrations/github-actions/) - [Jenkins]({{site.baseurl}}/docs/integrations/ci-integrations/jenkins/) + [Codefresh Classic]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/codefresh-classic/) + [GitHub Action]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/github-actions/) + [Jenkins]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/jenkins/) - For the complete list of arguments you can use, see [CI integration argument reference](#ci-integration-argument-reference) in this article. + For the complete list of arguments you can use, see [CI integration for GitOps argument reference](#ci-integration-argument-reference) in this article. 1. To generate a YAML snippet with the arguments, on the top-right, click **Generate Manifest**. Codefresh validates the generated manifest, and alerts you to undefined arguments that are required, and other errors. @@ -79,9 +79,9 @@ The table describes _all_ the arguments required for CI integrations in general. | `CF_RUNTIME_NAME` | The runtime to use for the integration. If you have more than one runtime, select the runtime from the list. | Required | | `CF_PLATFORM_URL` | The root URL of the Codefresh application. The default value is `https://g.codefresh.io`. | Optional | | `CF_API_KEY` | The API key for authentication. Generate the key for the integration. | Required | -| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. See [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). | Optional | +| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. See [Container registry integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/). | Optional | | `CF_JIRA_INTEGRATION` | _Deprecated from version 0.0.565 and higher._ Replaced by `CF_ISSUE_TRACKING_INTEGRATION`. | _Deprecated_ -| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use for image enrichment. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/integrations/issue-tracking/jira/). | Optional | +| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use for image enrichment. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/jira/). | Optional | | `CF_IMAGE` | The image to be enriched and reported in Codefresh. Pass the `[account-name]/[image-name]:[tag]` built in your CI. | Required | | `CF_WORKFLOW_NAME` | The name assigned to the workflow that builds the image. When defined, the name is displayed in the Codefresh platform. Example, `Staging step` | Optional | | `CF_GIT_BRANCH` | The Git branch with the commit and PR (pull request) data to add to the image. Pass the Branch from the event payload used to trigger your action. | Required | @@ -96,9 +96,9 @@ The table describes _all_ the arguments required for CI integrations in general. | `CF_JIRA_MESSAGE` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira issue IDs matching the string to associate with the image. | Required | | `CF_JIRA_FAIL_ON_NOT_FOUND` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The report image action when the `CF_JIRA_MESSAGE` is not found. When set to `true`, the report image action is failed. | Required | -### Related articles -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) -[Issue tracking intergrations]({{site.baseurl}}/docs/integrations/issue-tracking/) +## Related articles +[Container registry GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/) +[Issue tracking GitOps intergrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) diff --git a/_docs/integrations/ci-integrations/codefresh-classic.md b/_docs/gitops-integrations/ci-integrations/codefresh-classic.md similarity index 83% rename from _docs/integrations/ci-integrations/codefresh-classic.md rename to _docs/gitops-integrations/ci-integrations/codefresh-classic.md index e15ab68ec..73ab5e94a 100644 --- a/_docs/integrations/ci-integrations/codefresh-classic.md +++ b/_docs/gitops-integrations/ci-integrations/codefresh-classic.md @@ -1,16 +1,16 @@ --- -title: "Codefresh Classic" +title: "Codefresh CI pipeline GitOps integration" description: "" -group: integrations +group: gitops-integrations sub_group: ci-integrations toc: true --- - Use Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. Codefresh Classic is one of the third-party CI platform/tools that you can connect to Codefresh for deployment with image enrichment and reporting. + Use Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. If you have Hosted or Hybrid GitOps, you can connect your CI pipelines to Hosted GitOps for deployment with image enrichment and reporting. - Connecting Codefresh Classic, adds the CI information to images which are displayed in the Images dashboard, as in the example below. + Connecting your CI pipeline, adds the CI information to images which are displayed in the Images dashboard, as in the example below. {% include image.html @@ -24,53 +24,65 @@ toc: true -For information on how to use the image reporting action in your Codefresh Classic pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). +For information on how to use the image reporting action in your Codefresh pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/). -### Example of Codefresh Classic pipeline with report image step +## Example of Codefresh pipeline with report image step {% highlight yaml %} {% raw %} -reportImage: - title: Report image to Codefresh CD - type: codefresh-report-image - working_directory: /code - arguments: - # The URL to the cluster with the Codefresh runtime to integrate with. - CF_HOST: '[runtime-host-url]' - - # Codefresh API key !! Committing a plain text token is a security risk. We highly recommend using encrypted secrets !! - # Documentation - https://codefresh.io/docs/docs/configure-ci-cd-pipeline/secrets-store/ - CF_API_KEY: ${{API_KEY}} - - # Image path to enrich - CF_IMAGE: '[full image path here, including tag]' - - # Name of Container registry integration - CF_CONTAINER_REGISTRY_INTEGRATION: 'v2' - - # The git branch which is related for the commit - CF_GIT_BRANCH: '[name-of-your-git-branch]' - - # Name of Jira integration - CF_JIRA_INTEGRATION: 'jira' - - # Jira project filter - CF_JIRA_PROJECT_PREFIX: '[jira-project-prefix]' - - # String starting with the issue ID to associate with image - CF_JIRA_MESSAGE: '[issue-id]' +version: "1.0" +stages: + - "clone" + - "build" + - "report" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}" + revision: "${{CF_BRANCH}}" + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "${{CF_REPO_OWNER}}/color" + working_directory: "${{clone}}" + tag: "${{CF_SHORT_REVISION}}" + dockerfile: "Dockerfile" + registry: docker-lr + stage: "build" + + ReportImageMetadataAll: + title: Report image to Codefresh CD + type: codefresh-report-image + working_directory: /code + stage: "report" + arguments: + CF_API_KEY: '${{CF_API_KEY}}' + CF_IMAGE: 'docker.io/${{CF_REPO_OWNER}}/color:${{CF_SHORT_REVISION}}' + CF_CONTAINER_REGISTRY_INTEGRATION: docker + CF_RUNTIME_NAME: "codefresh-hosted" + CF_GITHUB_TOKEN: '${{GITHUB_TOKEN}}' + CF_GIT_PROVIDER: github + CF_GIT_REPO: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + CF_GIT_BRANCH: '${{CF_BRANCH}}' + CF_ISSUE_TRACKING_INTEGRATION: jira + CF_JIRA_MESSAGE: "${{CF_COMMIT_MESSAGE}}" + CF_JIRA_PROJECT_PREFIX: CR {% endraw %} {% endhighlight yaml %} -### Codefresh Classic-Codefresh integration arguments +## CI pipeline-GitOps integration settings The table describes the arguments required to connect Codefresh Classic to Codefresh. >Except for Git branch and Git repo which are required, you can omit other Git provider arguments. Codefresh retrieves the required values from the runtime context selected for the integration. -For the complete argument reference, see [CI integration argument reference]({{site.baseurl}}/docs/integrations/ci-integrations/#ci-integration-argument-reference). +For the complete argument reference, see [CI integration for GitOps argument reference]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/#ci-integration-argument-reference). {: .table .table-bordered .table-hover} @@ -79,9 +91,9 @@ For the complete argument reference, see [CI integration argument reference]({{s | `CF_RUNTIME_NAME` | The runtime to use for the integration. If you have more than one runtime, select the runtime from the list. | Required | | `CF_PLATFORM_URL` | The root URL of the Codefresh application. The default value is `https://g.codefresh.io`. | Optional | | `CF_API_KEY` | The API key to authenticate the Codefresh Classic user to Codefresh. Generate the key for the integration. | Required | -| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. To create a container registry integration if you don't have one, click **Create Container Registry Integration**, and then configure the settings. See [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). | Optional | +| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. To create a container registry integration if you don't have one, click **Create Container Registry Integration**, and then configure the settings. See [Container registry integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/). | Optional | | `CF_JIRA_INTEGRATION` | Deprecated from version 0.0.565. Replaced by `CF_ISSUE_TRACKING_INTEGRATION`. | _Deprecated_ -| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/integrations/issue-tracking/jira/). | Optional | +| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/jira/). | Optional | | `CF_IMAGE` | The image to be enriched and reported in Codefresh. Pass the `[account-name]/[image-name]:[tag]` built in your CI. | Required | | `CF_WORKFLOW_NAME` | The name assigned to the workflow that builds the image. When defined, the name is displayed in the Codefresh platform. Example, `Staging step` | Optional | | `CF_GIT_BRANCH` | The Git branch with the commit and PR (pull request) data to add to the image. Pass the Branch from the event payload used to trigger your action. | Required | @@ -96,19 +108,19 @@ For the complete argument reference, see [CI integration argument reference]({{s | `CF_JIRA_MESSAGE` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira issue IDs matching the string to associate with the image. | Required | | `CF_JIRA_FAIL_ON_NOT_FOUND` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The report image action when the `CF_JIRA_MESSAGE` is not found. When set to `true`, the report image action is failed. | Required | -For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh]({{site.baseurl}}/docs/integrations/ci-integrations/#connect-a-third-party-ci-platformtool-to-codefresh/). +For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh GitOps]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/#connect-a-third-party-ci-platformtool-to-codefresh/). -### Templatization examples for CF arguments +## Templatization examples for CF arguments -Arguments such as `CF_IMAGE`, `CF_GIT_BRANCH`, and `CF_JIRA_MESSAGE` are populated dynamically when the Codefresh Classic integration pipeline is triggered. You can templatize the values of these arguments to ensure that the required information is included in the reported image. +Arguments such as `CF_IMAGE`, `CF_GIT_BRANCH`, and `CF_JIRA_MESSAGE` are populated dynamically when the Codefresh integration pipeline is triggered. You can templatize the values of these arguments to ensure that the required information is included in the reported image. -Codefresh Classic offers [system variables](https://codefresh.io/docs/docs/codefresh-yaml/variables/#system-provided-variables) you can use to templatize argument values. +Codefresh pipelines have [system variables]({{site.baseurl}}/docs/pipelines/variables/#system-provided-variables) you can use to templatize argument values. {::nomarkdown}
      {:/} -#### CF_IMAGE examples +### CF_IMAGE examples **Example: Report full repo and branch information** This example illustrates how to define the value for `CF_IMAGE` to report the repo owner, name, and branch, with the Git hash. @@ -146,7 +158,7 @@ where:
      {:/} -#### CF_GIT_BRANCH examples +### CF_GIT_BRANCH examples **Example: Report Git branch or tag with committer and commit message** @@ -195,14 +207,14 @@ Value:
      {:/} -#### CF_JIRA_MESSAGE examples +### CF_JIRA_MESSAGE examples The Jira message represents an existing Jira issue, and must be a literal string. Value: `CR-1246` -### Codefresh Classic integration logs -View and analyze logs for Codefresh Classic workflows through the Logs tab. When a Codefresh Classic pipeline is run, it is added to the Logs tab. +## Codefresh pipeline integration logs +View and analyze logs for Codefresh pipelines through the Logs tab. When a Codefresh pipeline is run, it is added to the Logs tab. You can: * Filter by status or by date range to view a subset of actions * Navigate to the build file in Codefresh Classic, and view the Codefresh report image step @@ -216,7 +228,7 @@ caption="Codefresh Classic: Logs tab" max-width="50%" %} -**Build in Codefresh Classic** +**Build in Codefresh** The Run column includes the link to the pipeline in Codefresh Classic. @@ -240,8 +252,8 @@ caption="Logs for Codefresh report image step in Codefresh Classic build" max-width="50%" %} -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) \ No newline at end of file +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[Container registry GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) \ No newline at end of file diff --git a/_docs/integrations/ci-integrations/github-actions.md b/_docs/gitops-integrations/ci-integrations/github-actions.md similarity index 91% rename from _docs/integrations/ci-integrations/github-actions.md rename to _docs/gitops-integrations/ci-integrations/github-actions.md index fa6dd1c89..2d6246cae 100644 --- a/_docs/integrations/ci-integrations/github-actions.md +++ b/_docs/gitops-integrations/ci-integrations/github-actions.md @@ -1,13 +1,13 @@ --- title: "GitHub Actions" description: "" -group: integrations +group: gitops-integrations sub_group: ci-integrations toc: true --- -Use Codefresh Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. -GitHub Actions is one of the third-party CI solutions that you can connect to Codefresh for deployment with image reporting and enrichment. +Use Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. +GitHub Actions is one of the third-party CI solutions that you can connect to Hosted GitOps for deployment with image reporting and enrichment. Connecting a GitHub Action, adds the CI information to images which are displayed in the Images dashboard, as in the example below. @@ -21,10 +21,10 @@ GitHub Actions is one of the third-party CI solutions that you can connect to Co max-width="70%" %} -For information on how to use the image reporting action in your GitHub Action pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). +For information on how to use the image reporting action in your GitHub Action pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/). -### Example of GitHub Actions pipeline with Codefresh report image action +## Example of GitHub Actions pipeline with Codefresh report image action Here is an example pipeline that uses GitHub Actions to build a container image, and the Codefresh action to enrich and report the resulting image to Codefresh. @@ -100,7 +100,7 @@ jobs: {% endraw %} {% endhighlight yaml %} -### GitHub Action-Codefresh integration arguments +## GitHub Action-GitOps integration settings The table describes the arguments required to connect a GitHub Action to Codefresh. @@ -111,10 +111,10 @@ The table describes the arguments required to connect a GitHub Action to Codefre | `CF_RUNTIME_NAME` | The runtime to use for the integration. If you have more than one runtime, select the runtime from the list. | Required | | `CF_PLATFORM_URL` | The root URL of the Codefresh application. The default value is `https://g.codefresh.io`. | Optional | | `CF_API_KEY` | The API key to authenticate the GitHub Actions user to Codefresh. Generate the key for the GitHub Action. {::nomarkdown}
      Enter this token in GitHub Actions as a secret with the name CF_API_KEY. You can then reference it in all GitHub pipelines as you would any other secret.{:/}| Required | -| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. {::nomarkdown}
      • For a GitHub Container registry, select GHCR_GITHUB_TOKEN_AUTHENTICATION even if you have not created an integration in Codefresh.
        Codefresh retrieves and provides the explicit credentials for the container registry on generating the integration manifest.
      • To create a container registry integration if you don't have one, click Create Container Registry Integration, and then configure the settings.
        See Container registry integrations.
      {:/} | Optional | +| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. {::nomarkdown}
      • For a GitHub Container registry, select GHCR_GITHUB_TOKEN_AUTHENTICATION even if you have not created an integration in Codefresh.
        Codefresh retrieves and provides the explicit credentials for the container registry on generating the integration manifest.
      • To create a container registry integration if you don't have one, click Create Container Registry Integration, and then configure the settings.
        See Container registry integrations.
      {:/} | Optional | | `CF_GIT_REPO` | The Git repository with the configuration and code used to build the image. If not defined, Codefresh retrieves it from the repo defined for the GitHub Action. | Required | | `CF_JIRA_INTEGRATION` | Deprecated from version 0.0.565. Replaced by `CF_ISSUE_TRACKING_INTEGRATION`. | _Deprecated_ -| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/integrations/issue-tracking/jira/). | Optional | +| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/jira/). | Optional | | `CF_IMAGE` | The image to be enriched and reported in Codefresh. Pass the `[account-name]/[image-name]:[tag]` built in your CI. | Required | | `CF_WORKFLOW_NAME` | The name assigned to the workflow that builds the image. When defined, the name is displayed in the Codefresh platform. Example, `Staging step` | Optional | | `CF_GIT_BRANCH` | The Git branch with the commit and PR (pull request) data to add to the image. Pass the Branch from the event payload used to trigger your action. | Required | @@ -124,8 +124,9 @@ The table describes the arguments required to connect a GitHub Action to Codefre | `CF_JIRA_FAIL_ON_NOT_FOUND` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The report image action when the `CF_JIRA_MESSAGE` is not found. When set to `true`, the report image action is failed. | Required | -For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh]({{site.baseurl}}/docs/integrations/ci-integrations/#connect-a-third-party-ci-platformtool-to-codefresh). -### Templatization examples for CF arguments +For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/#connect-a-third-party-ci-platformtool-to-codefresh). + +## Templatization examples for CF arguments Arguments such as `CF_IMAGE`, `CF_GIT_BRANCH`, and `CF_JIRA_MESSAGE` are populated dynamically when the GitHub Actions pipeline is triggered. You can templatize the values of these arguments to ensure that the required information is included in the reported image. @@ -135,7 +136,7 @@ See GitHub Actions [environment variables](https://docs.github.com/en/actions/le
      {:/} -#### CF_IMAGE +### CF_IMAGE **Example: Report full repo and branch information** This example illustrates how to define the value for `CF_IMAGE` to report the repo owner, name, and short branch, with the Git hash. @@ -176,7 +177,7 @@ where:
      {:/} -#### CF_GIT_BRANCH +### CF_GIT_BRANCH **Example: Report fully-formed reference of the branch or tag** This example illustrates how to define the value for `CF_GIT_BRANCH` to report the fully-formed reference of the branch or tag that triggered the workflow run. @@ -203,13 +204,13 @@ where:
      {:/} -#### CF_JIRA_MESSAGE +### CF_JIRA_MESSAGE The Jira message represents an existing Jira issue, and must be a literal string. Value: `CR-1246` -### GitHub Action logs +## GitHub Action logs View and analyze logs for GitHub Action workflows through the Logs tab. When a GitHub Action is run, it is added to the Logs tab. You can: * Filter by status or by date range to view a subset of actions @@ -249,10 +250,10 @@ max-width="50%" %} -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[Container registry GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) diff --git a/_docs/integrations/ci-integrations/jenkins.md b/_docs/gitops-integrations/ci-integrations/jenkins.md similarity index 94% rename from _docs/integrations/ci-integrations/jenkins.md rename to _docs/gitops-integrations/ci-integrations/jenkins.md index ddfa1eba4..842a47a17 100644 --- a/_docs/integrations/ci-integrations/jenkins.md +++ b/_docs/gitops-integrations/ci-integrations/jenkins.md @@ -1,7 +1,7 @@ --- -title: "Jenkins" +title: "Jenkins GitOps integration" description: "" -group: integrations +group: gitops-integrations sub_group: ci-integrations toc: true --- @@ -21,9 +21,9 @@ toc: true %} -For information on how to use the image reporting action in your Jenkins pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). +For information on how to use the image reporting action in your Jenkins pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/). -### Example of Jenkins pipeline with report image step +## Example of Jenkins pipeline with report image step {% highlight yaml %} {% raw %} @@ -110,7 +110,7 @@ pipeline { {% endraw %} {% endhighlight yaml %} -### Jenkins-Codefresh integration arguments +## Jenkins-GitOps integration settings The table describes the arguments to connect Codefresh Classic to Codefresh. {: .table .table-bordered .table-hover} @@ -119,9 +119,9 @@ The table describes the arguments to connect Codefresh Classic to Codefresh. | `CF_RUNTIME_NAME` | The runtime to use for the integration. If you have more than one runtime, select the runtime from the list. | Required | | `CF_PLATFORM_URL` | The root URL of the Codefresh application. The default value is `https://g.codefresh.io`. | Optional | | `CF_API_KEY` | The API key to authenticate the Codefresh Classic user to Codefresh. Generate the key for the integration. | Required | -| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. To create a container registry integration if you don't have one, click **Create Container Registry Integration**, and then configure the settings. See [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). | Optional | +| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. To create a container registry integration if you don't have one, click **Create Container Registry Integration**, and then configure the settings. See [Container registry integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/). | Optional | | `CF_JIRA_INTEGRATION` | Deprecated from version 0.0.565. Replaced by `CF_ISSUE_TRACKING_INTEGRATION`. | _Deprecated_ -| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/integrations/issue-tracking/jira/). | Optional | +| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/jira/). | Optional | | `CF_IMAGE` | The image to be enriched and reported in Codefresh. Pass the `[account-name]/[image-name]:[tag]` built in your CI. | Required | | `CF_GIT_BRANCH` | The Git branch with the commit and PR (pull request) data to add to the image. Pass the Branch from the event payload used to trigger your action. | Required | | `CF_GIT_REPO` | The Git repository with the configuration and code used to build the image. | Required | @@ -136,9 +136,9 @@ The table describes the arguments to connect Codefresh Classic to Codefresh. | `CF_JIRA_FAIL_ON_NOT_FOUND` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The report image action when the `CF_JIRA_MESSAGE` is not found. When set to `true`, the report image action is failed. | Required | -For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh]({{site.baseurl}}/docs/integrations/ci-integrations/#connect-a-third-party-ci-platform-tool-to-codefresh). +For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/#connect-a-third-party-ci-platform-tool-to-codefresh). -### Templatization examples for CF arguments +## Templatization examples for CF arguments Arguments such as `CF_IMAGE`, `CF_GIT_BRANCH`, and `CF_JIRA_MESSAGE` are populated dynamically when the Jenkins pipeline is triggered. You can templatize the values of these arguments in the pipeline to ensure that the required information is included in the reported image. @@ -148,7 +148,7 @@ Jenkins offers a Git plugin with [environment variables](https://plugins.jenkins
      {:/} -#### CF_IMAGE +### CF_IMAGE **Example: Report repo, branch with Git hash** This example illustrates how to define the value for `CF_IMAGE` to report Git repo, branch, committer, and Git hash information. @@ -188,7 +188,7 @@ This example illustrates how to define the value for `CF_IMAGE` value to report
      {:/} -#### CF_GIT_BRANCH +### CF_GIT_BRANCH **Example: Report the fully-formed Git branch** This example illustrates how to define the value for `CF_GIT_BRANCH` value to report the fully-formed Git branch. @@ -215,13 +215,13 @@ This example illustrates how to define the value for `CF_GIT_BRANCH` value to re
      {:/} -#### CF_JIRA_MESSAGE +### CF_JIRA_MESSAGE The Jira message represents an existing Jira issue, and must be a literal string. Value: `CR-1246` -### Jenkins integration logs +## Jenkins integration logs View and analyze logs for Jenkins through the Logs tab. When a Jenkins pipeline is run, it is added to the Logs tab. You can: * Filter by status or by date range to view a subset of actions @@ -243,8 +243,8 @@ caption="Logs for Codefresh report image step in Jenkins build" max-width="50%" %} -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) \ No newline at end of file +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[Container registry GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) diff --git a/_docs/integrations/container-registries.md b/_docs/gitops-integrations/container-registries.md similarity index 69% rename from _docs/integrations/container-registries.md rename to _docs/gitops-integrations/container-registries.md index 2aef4ac1e..69a7a87bd 100644 --- a/_docs/integrations/container-registries.md +++ b/_docs/gitops-integrations/container-registries.md @@ -1,13 +1,13 @@ --- -title: "Container registry integrations" +title: "GitOps container registry integrations" description: "" -group: integrations +group: gitops-integrations toc: true --- Codefresh can integrate with popular container registries such as Docker Hub, JFrog Artifactory, and more. -Adding a container registry integration in Codefresh allows you to reference the integration in third-party CI platforms/tools such as GitHub Actions and Codefresh Classic by the name of the registry integration, instead of explicit credentials. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) and [CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). +Adding a container registry integration in Codefresh allows you to reference the integration in third-party CI platforms/tools such as GitHub Actions and Codefresh Classic by the name of the registry integration, instead of explicit credentials. See [Image enrichment with integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) and [CI integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/). You add a container registry integration in Codefresh by: * Defining the integration name @@ -21,29 +21,29 @@ You can add more than one integration for the same registry. Once added, Codefre -### Configure container registry integrations in Codefresh -Configure the settings for a container registry integration in Codefresh. +## Configure container registry integrations for GitOps in Codefresh +Configure the settings for a GitOps container registry integration in Codefresh. -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**GitOps Integrations**](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. 1. Filter by **Container Registry**, select the container registry, and click **Configure**. 1. If you already have integrations, click **Add**. 1. Define the arguments for the container registry: - [Amazon ECR]({{site.baseurl}}/docs/integrations/container-registries/amazon-ecr/) - [Docker Hub]({{site.baseurl}}/docs/integrations/container-registries/dockerhub/) - [GitHub Container Registry]({{site.baseurl}}/docs/integrations/container-registries/github-cr/) - [JFrog Artifactory]({{site.baseurl}}/docs/integrations/container-registries/jfrog/) - [Quay]({{site.baseurl}}/docs/integrations/container-registries/quay/) + [Amazon ECR]({{site.baseurl}}/docs/gitops-integrations/container-registries/amazon-ecr/) + [Docker Hub]({{site.baseurl}}/docs/gitops-integrations/container-registries/dockerhub/) + [GitHub Container Registry]({{site.baseurl}}/docs/gitops-integrations/container-registries/github-cr/) + [JFrog Artifactory]({{site.baseurl}}/docs/gitops-integrations/container-registries/jfrog/) + [Quay]({{site.baseurl}}/docs/gitops-integrations/container-registries/quay/) 1. To test the connection to the container registry before committing the changes, click **Test Connection**. 1. To confirm, click **Commit**. It may take a few moments for the new integration to be synced to the cluster before it appears in the list. -### Integration resource in shared configuration repo +## Integration resource in shared configuration repo The integration resource for the container registry is created in the Git repository with the shared configuration, within `resources`. The exact location depends on whether the integration is shared with all or specific runtimes: * All runtimes: Created in `resources/all-runtimes-all-clusters/` * Selected runtimes: Created in `resources/runtimes//` -### View container registry integrations +## View container registry integrations for GitOps Selecting a container registry integration displays the existing integrations for that registry in Codefresh. The example below shows integrations for JFrog Artifactory. @@ -61,12 +61,12 @@ Every container registry integration displays the following information: * Runtime or runtimes it is shared with * Sync status -### Edit/delete container registry integrations +### Edit/delete container registry integrations for GitOps If you have existing integrations, you can change the connection details, or delete an integration. >Deleting an integration deletes the integration resource from the shared configuration Git repo, its secrets, the CI workflows that use it. -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**GitOps Integrations**](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. 1. Filter by **Container Registry**, and select the specific container registry integration. 1. In the row with the integration to edit or delete, click the three dots and select **Edit** or **Delete**. 1. To edit, update the **Username** and **Password** fields, and click **Test Connection** to verify the account credentials. @@ -83,8 +83,8 @@ use it. %} ### Related articles -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) +[CI GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) [Images]({{site.baseurl}}/docs/deployment/images/) [Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/) diff --git a/_docs/integrations/container-registries/amazon-ecr.md b/_docs/gitops-integrations/container-registries/amazon-ecr.md similarity index 73% rename from _docs/integrations/container-registries/amazon-ecr.md rename to _docs/gitops-integrations/container-registries/amazon-ecr.md index 17aee8611..37d4baba2 100644 --- a/_docs/integrations/container-registries/amazon-ecr.md +++ b/_docs/gitops-integrations/container-registries/amazon-ecr.md @@ -1,17 +1,17 @@ --- -title: "Amazon ECR" +title: "Amazon ECR GitOps integration" description: "" -group: integrations +group: gitops-integrations sub_group: container-registries toc: true --- Codefresh has native support for interacting with Amazon ECR (Elastic Container Registry), to push, pull, and deploy images. -For information on adding an Amazon ECR integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). +For information on adding an Amazon ECR integration for GitOps in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/). ->Amazon ECR integration is supported only in hybrid runtimes. +>Amazon ECR integration is supported only for Hybrid GitOps. -### Prerequisites +## Prerequisites Before you configure settings in Codefresh to integrate Amazon ECR: * [Create an IAM (Identity and Access Management) role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html){:target="\_blank"} @@ -29,8 +29,8 @@ For example: ``` For detailed information, see [How Amazon Elastic Container Registry Works with IAM](https://docs.aws.amazon.com/AmazonECR/latest/userguide/security_iam_service-with-iam.html){:target="\_blank"} and the [AWS security blog](https://aws.amazon.com/blogs/security/how-to-use-trust-policies-with-iam-roles/){:target="\_blank"}. -### Amazon ECR integration settings in Codefresh -The table describes the arguments required to integrate Amazon ECR in Codefresh. +## Amazon ECR-GitOps integration settings in Codefresh +The table describes the arguments required for GitOps integrations with Amazon ECR in Codefresh. {: .table .table-bordered .table-hover} | Setting | Description | @@ -52,10 +52,11 @@ The table describes the arguments required to integrate Amazon ECR in Codefresh max-width="50%" %} -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). +For how-to instructions, see [Configure container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#editdelete-container-registry-integrations). -### Related articles + +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking]({{site.baseurl}}/docs/integrations/issue-tracking/) \ No newline at end of file +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[CI GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) \ No newline at end of file diff --git a/_docs/integrations/container-registries/dockerhub.md b/_docs/gitops-integrations/container-registries/dockerhub.md similarity index 74% rename from _docs/integrations/container-registries/dockerhub.md rename to _docs/gitops-integrations/container-registries/dockerhub.md index d74025c95..e6ee7a7eb 100644 --- a/_docs/integrations/container-registries/dockerhub.md +++ b/_docs/gitops-integrations/container-registries/dockerhub.md @@ -1,23 +1,23 @@ --- -title: "Docker Hub" +title: "Docker Hub GitOps integration" description: "" -group: integrations +group: gitops-integrations sub_group: container-registries toc: true --- Codefresh has native support for interacting with Docker Hub registries, to push, pull, and deploy images. -For information on adding a Docker Hub integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). +For information on adding a Docker Hub integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/). -### Prerequisites +## Prerequisites Before you configure settings in Codefresh to integrate Docker Hub registry, do the following: * [Create an account or sign in to your account at Docker Hub](https://hub.docker.com/signup){:target="\_blank"} * (Optional) [Enable 2FA (Two-Factor Authentication)](https://docs.docker.com/docker-hub/2fa/){:target="\_blank"} * [Create a personal account token](https://docs.docker.com/docker-hub/access-tokens/){:target="\_blank"} -### Docker Hub integration settings in Codefresh -The table describes the arguments required to integrate Docker Hub to Codefresh. +## Docker Hub-GitOps integration settings in Codefresh +The table describes the arguments required for Docker Hub GitOps integration in Codefresh. {: .table .table-bordered .table-hover} | Setting | Description | @@ -39,11 +39,11 @@ The table describes the arguments required to integrate Docker Hub to Codefresh. max-width="50%" %} -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). +For how-to instructions, see [Configure container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#editdelete-container-registry-integrations). -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[CI GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) diff --git a/_docs/integrations/container-registries/github-cr.md b/_docs/gitops-integrations/container-registries/github-cr.md similarity index 75% rename from _docs/integrations/container-registries/github-cr.md rename to _docs/gitops-integrations/container-registries/github-cr.md index f8aa77a79..48baa862c 100644 --- a/_docs/integrations/container-registries/github-cr.md +++ b/_docs/gitops-integrations/container-registries/github-cr.md @@ -1,15 +1,15 @@ --- -title: "GitHub Container Registry (GHCR)" +title: "GitHub Container Registry (GHCR) GitOps integration" description: "" -group: integrations +group: gitops-integrations sub_group: container-registries toc: true --- The GitHub Container registry allows you to host and manage your Docker container images in your personal or organisation account on GitHub. One of the benefits is that permissions can be defined for the Docker image independent from any repository. Thus, your repository could be private and your Docker image public. -For information on adding a GitHub Container registry integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). +For information on adding a GitHub Container registry integration in Codefresh, see [Container registry GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/). -### Prerequisites +## Prerequisites Before you configure settings in Codefresh to integrate GitHub container registry: * Make sure you have a personal access token with the correct scopes or create one. You need at least the following scopes: @@ -21,7 +21,7 @@ Before you configure settings in Codefresh to integrate GitHub container registr For detailed information, see the [Authenticating to the Container registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry){:target="\_blank"}. -### GitHub Container registry (GHCR) integration settings in Codefresh +### GitHub Container registry (GHCR)-GitOps integration settings in Codefresh {: .table .table-bordered .table-hover} | Setting | Description | @@ -44,10 +44,10 @@ Before you configure settings in Codefresh to integrate GitHub container registr max-width="50%" %} -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). +For how-to instructions, see [Configure container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#editdelete-container-registry-integrations). -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking]({{site.baseurl}}/docs/integrations/issue-tracking/) +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[CI GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) diff --git a/_docs/integrations/container-registries/jfrog.md b/_docs/gitops-integrations/container-registries/jfrog.md similarity index 71% rename from _docs/integrations/container-registries/jfrog.md rename to _docs/gitops-integrations/container-registries/jfrog.md index ccb22dd0d..3aa108e7f 100644 --- a/_docs/integrations/container-registries/jfrog.md +++ b/_docs/gitops-integrations/container-registries/jfrog.md @@ -1,17 +1,17 @@ --- -title: "JFrog Artifactory" +title: "JFrog Artifactory GitOps integration" description: "" -group: integrations +group: gitops-integrations sub_group: container-registries toc: true --- Codefresh has native support for interacting with JFrog Artifactory. -For information on adding a JFrog Artifactory integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). +For information on adding a JFrog Artifactory integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/). -### JFrog Artifactory integration settings in Codefresh +## JFrog Artifactory-GitOps integration settings in Codefresh {: .table .table-bordered .table-hover} | Setting | Description | @@ -34,10 +34,10 @@ For information on adding a JFrog Artifactory integration in Codefresh, see [Con max-width="50%" %} -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). +For how-to instructions, see [Configure container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#editdelete-container-registry-integrations). -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking]({{site.baseurl}}/docs/integrations/issue-tracking/) +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[CI GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) diff --git a/_docs/integrations/container-registries/quay.md b/_docs/gitops-integrations/container-registries/quay.md similarity index 71% rename from _docs/integrations/container-registries/quay.md rename to _docs/gitops-integrations/container-registries/quay.md index dfe9cf817..80d50d5f0 100644 --- a/_docs/integrations/container-registries/quay.md +++ b/_docs/gitops-integrations/container-registries/quay.md @@ -1,23 +1,22 @@ --- -title: "Quay" +title: "Quay GitOps integration" description: "" -group: integrations +group: gitops-integrations sub_group: container-registries toc: true --- Codefresh has native support for interacting with Quay registries, from where you can push, pull, and deploy images. -Adding a Quay integration allows you to reference the integration in external CI tools such as GitHub Actions by the name of the integration account, instead of adding explicit credentials. See [Image enrichment overview]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) and [CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). +Adding a Quay integration allows you to reference the integration in external CI tools such as GitHub Actions by the name of the integration account, instead of adding explicit credentials. See [Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) and [CI integrations for GitOps]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/). -### Prerequisites +## Prerequisites 1. [Create a Redhat/Quay account at Quay](https://quay.io/){:target="\_blank"}. 1. Optional. For Codefresh integration, [create a robot account](https://docs.quay.io/glossary/robot-accounts.html){:target="\_blank"}. -### Quay integration settings in Codefresh +## Quay-GitOps integration settings in Codefresh -The table describes the arguments required to integrate Quay in Codefresh. {: .table .table-bordered .table-hover} | Setting | Description | @@ -37,15 +36,15 @@ The table describes the arguments required to integrate Quay in Codefresh. max-width="50%" %} -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). +For how-to instructions, see [Configure container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations for GitOps in Codefresh]({{site.baseurl}}/docs/gitops-integrations/container-registries/#editdelete-container-registry-integrations). Make sure you have the: * Quay domain username * Quay domain-encrypted password or that of the robot account -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[CI GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/) diff --git a/_docs/integrations/image-enrichment-overview.md b/_docs/gitops-integrations/image-enrichment-overview.md similarity index 65% rename from _docs/integrations/image-enrichment-overview.md rename to _docs/gitops-integrations/image-enrichment-overview.md index cf2d9ca29..a19ec3239 100644 --- a/_docs/integrations/image-enrichment-overview.md +++ b/_docs/gitops-integrations/image-enrichment-overview.md @@ -1,7 +1,7 @@ --- -title: "Image enrichment with integrations" +title: "GitOps image enrichment with integrations" description: "" -group: integration +group: gitops-integrations toc: true --- @@ -16,38 +16,38 @@ Codefresh has new report images templates, optimized to work with third-party CI -### CI integration flow for image enrichment +## CI integration flow for image enrichment Integrate Codefresh with your CI platform/tool account with a unique name per integration account. -#### 1. Add/configure integration +### 1. Add/configure integration Add/configure the integration account for the third-party tools. You can set up multiple integration accounts for the same tool. When you add an integration, Codefresh creates a Sealed Secret with the integration credentials, and a ConfigMap that references the secret. See: * Issue tracking - [JIRA]({{site.baseurl}}/docs/integrations/issue-tracking/jira/) + [JIRA]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/jira/) * Container registries - [Amazon ECR]({{site.baseurl}}/docs/integrations/container-registries/amazon-ecr/) - [DockerHub]({{site.baseurl}}/docs/integrations/container-registries/dockerhub/) - [JFrog Artifactory]({{site.baseurl}}/docs/integrations/container-registries/jfrog/) - [Quay]({{site.baseurl}}/docs/integrations/container-registries/quay/) + [Amazon ECR]({{site.baseurl}}/docs/gitops-integrations/container-registries/amazon-ecr/) + [DockerHub]({{site.baseurl}}/docs/gitops-integrations/container-registries/dockerhub/) + [JFrog Artifactory]({{site.baseurl}}/docs/gitops-integrations/container-registries/jfrog/) + [Quay]({{site.baseurl}}/docs/gitops-integrations/container-registries/quay/) We are working on supporting integrations for more tools. Stay tuned for the release announcements. For image enrichment with a tool that is as yet unsupported, you must define the explicit credentials. -#### 2. Connect CI platform/tool to Codefresh +### 2. Connect CI platform/tool to GitOps -Connect a CI platform/tool to Codefresh with an API token for the runtime cluster, the integration accounts, and image information for enrichment and reporting. +Connect a CI platform/tool to Codefresh GitOps with an API token for the runtime cluster, the integration accounts, and image information for enrichment and reporting. -[Codefresh Classic]({{site.baseurl}}/docs/integrations/ci-integrations/codefresh-classic/) -[GitHub Actions]({{site.baseurl}}/docs/integrations/ci-integrations/github-actions/) -[Jenkins]({{site.baseurl}}/docs/integrations/ci-integrations/jenkins/) +[Codefresh Classic]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/codefresh-classic/) +[GitHub Actions]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/github-actions/) +[Jenkins]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/jenkins/) -#### 3. Add the enrichment step for the CI platform/tool to your GitHub Actions pipeline +### 3. Add the enrichment step for the CI platform/tool to your GitHub Actions pipeline Finally, add the enrichment step to your CI pipeline with the API token and integration information. Codefresh uses the integration name to get the corresponding Sealed Secret to securely access and retrieve the information for image enrichment. @@ -55,7 +55,7 @@ Finally, add the enrichment step to your CI pipeline with the API token and inte [Codefresh Classic Codefresh report image](https://codefresh.io/steps/step/codefresh-report-image){:target="\_blank"}. -#### 4. View enriched image information +### 4. View enriched image information Once deployed, view enriched information in the Codefresh UI: * Go to [Images](https://g.codefresh.io/2.0/images){:target="\_blank"} * Go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard){:target="\_blank"} @@ -69,7 +69,7 @@ View: * Jira issues, status and details for each deployment -### Related articles +## Related articles [Images]({{site.baseurl}}/docs/deployment/images/) -[Applications dashboard]({{site.baseurl}}/docs/deployment/applications-dashboard/) +[Applications dashboard]({{site.baseurl}}/docs/gitops-deployment/applications-dashboard/) diff --git a/_docs/integrations/issue-tracking.md b/_docs/gitops-integrations/issue-tracking.md similarity index 72% rename from _docs/integrations/issue-tracking.md rename to _docs/gitops-integrations/issue-tracking.md index 60ea2d5a1..508dce8b4 100644 --- a/_docs/integrations/issue-tracking.md +++ b/_docs/gitops-integrations/issue-tracking.md @@ -1,14 +1,14 @@ --- -title: "Issue tracking integrations" +title: "GitOps issue tracking integrations" description: "" -group: integrations +group: gitops-integrations toc: true --- One of the major highlights of the Codefresh platform is the ability to automatically correlate software features with their deployment (where and when). While the software version of a component is easily identifiable, what is likely more interesting and important is to know which features are included in a release. -Adding an issue-tracking integration in Codefresh allows you to reference the integration in third-party CI platforms/tools such as GitHub Actions and Codefresh Classic by the name of the integration, instead of explicit credentials. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) and [CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). +Adding an issue-tracking integration in Codefresh allows you to reference the integration in third-party CI platforms/tools such as GitHub Actions and Codefresh Classic by the name of the integration, instead of explicit credentials. See [Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) and [CI integrations for GitOps]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/). You add an issue-tracking integration in Codefresh by: * Defining the integration name @@ -17,15 +17,16 @@ You add an issue-tracking integration in Codefresh by: * Committing the changes Once added, Codefresh displays the list of existing integrations with their sync status. You can edit or delete any integration. -### Configure container registry integrations in Codefresh -Configure the settings for a container registry integration in Codefresh. -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. +## Configure issue tracking integrations for GitOps in Codefresh +Configure the settings for an issue tracking integration for GitOps in Codefresh. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**GitOps Integrations**](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. 1. Filter by **Issue Tracking**, select the issue tracking tool to integrate, and click **Configure**. 1. Jira integrations only: For a new Jira integration, from the **Add Integration** dropdown, select the type of integration, as either **Deployment reporting** or **Image enrichment**. 1. If you already have integrations, click **Add**. 1. Define the arguments for the issue tracking tool: - [Jira]({{site.baseurl}}/docs/integrations/issue-tracking/jira/) + [Jira]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/jira/) 1. To confirm, click **Commit**. It may take a few moments for the new integration to be synced to the cluster before it appears in the list. @@ -35,7 +36,7 @@ The exact location depends on whether the integration is shared with all or spec * All runtimes: Created in `resources/all-runtimes-all-clusters/` * Selected runtimes: Created in `resources/runtimes//` -### View issue-tracking integrations +### View issue-tracking integrations for GitOps Selecting an issue tracking tool displays the existing integrations in Codefresh. @@ -44,12 +45,12 @@ Every issue tracking integration displays the following information: * Runtime or runtimes it is shared with * Sync status -### Edit/delete issue-tracking integrations in Codefresh +### Edit/delete issue-tracking integrations for GitOps in Codefresh If you have existing integrations, you can change the credentials, or delete an integration. >Deleting an integration deletes the integration resource from the shared configuration Git repo, its secrets, the CI workflows that use it. -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**GitOps Integrations**](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. 1. Filter by **Issue Tracking**, and select the specific integration. 1. In the row with the integration to edit or delete, click the three dots and select **Edit** or **Delete**. 1. To edit, update the **Username** and **Password** fields, and click **Test Connection** to verify the account credentials. @@ -67,6 +68,6 @@ use it. ### Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) +[CI GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Container registry GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/) diff --git a/_docs/integrations/issue-tracking/jira.md b/_docs/gitops-integrations/issue-tracking/jira.md similarity index 81% rename from _docs/integrations/issue-tracking/jira.md rename to _docs/gitops-integrations/issue-tracking/jira.md index fd223281e..08475c334 100644 --- a/_docs/integrations/issue-tracking/jira.md +++ b/_docs/gitops-integrations/issue-tracking/jira.md @@ -1,7 +1,7 @@ --- title: "Jira" description: " " -group: integrations +group: gitops-integrations sub_group: issue-tracking toc: true --- @@ -9,10 +9,10 @@ toc: true Codefresh has native integration for Atlassian Jira, to enrich images with information from Jira. Codefresh can monitor a feature all the way from the ticket creation phase, up to when it is implemented and deployed to an environment. -For information on adding a Jira integration in Codefresh, see [Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/). +For information on adding a Jira integration in Codefresh, see [Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/). -### Prerequisites +## Prerequisites 1. Get your Jira instance credentials by following the [Atlassian documentation](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/){:target="\_blank"}. 1. Note down the following as you will need them to complete the integration with Codefresh: @@ -21,9 +21,8 @@ For information on adding a Jira integration in Codefresh, see [Issue-tracking i * Jira password/token created for this user -### Jira integration settings in Codefresh +## Jira-GitOps integration settings in Codefresh -The table describes the arguments required to integrate Jira in Codefresh. {: .table .table-bordered .table-hover} | Setting | Description | @@ -45,14 +44,14 @@ The table describes the arguments required to integrate Jira in Codefresh. max-width="60%" %} -For information on adding a Jira integration in Codefresh, see [Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/). +For information on adding a Jira integration in Codefresh, see [Issue-tracking GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/issue-tracking/). -### Using Jira integration in pipelines +## Using Jira integration in pipelines For pipelines based on GitHub Actions, configure the Jira integration in Codefresh, and then connect your GitHub Action to Codefresh, referencing the Jira integration by name. Codefresh uses the Secret Key stored in the runtime cluster to securely access Jira and retrieve the information. -### Related articles +## Related articles [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) +[Image enrichment with GitOps integrations]({{site.baseurl}}/docs/gitops-integrations/image-enrichment-overview/) +[CI integrations]({{site.baseurl}}/docs/gitops-integrations/ci-integrations/) +[Container registry integrations]({{site.baseurl}}/docs/gitops-integrations/container-registries/) diff --git a/_docs/incubation/intro-hosted-runtime.md b/_docs/incubation/intro-hosted-runtime.md deleted file mode 100644 index c2a66b694..000000000 --- a/_docs/incubation/intro-hosted-runtime.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: "Hosted GitOps" -description: "" -group: incubation -toc: true ---- - - -Codefresh has enhanced our solution offering with Hosted GitOps, the SaaS version of Codefresh. - -What do you get with Hosted GitOps? -In a nutshell, a hosted and managed version of Argo CD. From application analytics, to application creation, rollout, and deployment, you get the best of both worlds: Argo CD with unique features and functionality from Codefresh to help achieve your CD goals. -What it also means is easy set up and zero maintenance overhead. - -Read on for more details. And check out our [blog](https://codefresh.io/blog/codefresh-upends-continuous-delivery-with-hosted-gitops-platform-featuring-dora-dashboards-and-first-class-integrations-for-ci/). - -### Hosted runtimes - -Setting up your hosted environment takes just a few clicks. All you need is a Codefresh account, a Git account, and a Kubernetes cluster to which to deploy your applications. -Codefresh guides you through the simple three-step process of provisioning your hosted runtime. From that point, Codefresh handles administration and maintenance of the hosted runtime, including version and security updates. - -See [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -{% include - image.html - lightbox="true" - file="/images/runtime/intro-hosted-hosted-initial-view.png" - url="/images/runtime/intro-hosted-hosted-initial-view.png" - alt="Hosted runtime setup" - caption="Hosted runtime setup" - max-width="80%" -%} - -### Global deployment analytics - -The Home dashboard presents enterprise-wide deployment highlights, making it a useful management tool. - -Get insights into important KPIs and deployments, across runtimes and clusters, all in the same location. View status of runtimes and managed clusters, deployments, failed deployments with rollbacks, most active applications. Use filters to narrow the scope to focus on anything specific. - -{% include - image.html - lightbox="true" - file="/images/incubation/home-dashboard.png" - url="/images/incubation/home-dashboard.png" - alt="Global deployment analytics" - caption="Global deployment analytics" - max-width="80%" -%} - -### Application analytics and analysis - -The Applications dashboard displays applications across runtimes and clusters, from which you can select and analyze individual applications. Individual application information is grouped by current and historical deployments, enriched with Argo, Jira, and Git details, including rollout visualizations for ongoing deployments (Timeline tab), and an interactive tree view of application resources (Current State tab). - -See [Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/). - -{% include - image.html - lightbox="true" - file="/images/applications/app-dashboard-main-view.png" - url="/images/applications/app-dashboard-main-view.png" - alt="Applications dashboard" - caption="Applications dashboard" - max-width="80%" -%} - -### DORA metrics - -DORA metrics has become integral to enterprises wanting to quantify DevOps performance, and Codefresh has out-of-the-box support for it. - -Apart from the metrics themselves, the DORA dashboard in Codefresh has several features such as the Totals bar with key metrics, filters that allow you to pinpoint just which applications or runtimes are contributing to problematic metrics, and the ability to set a different view granularity for each DORA metric. - -See [DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/). - -{% include - image.html - lightbox="true" - file="/images/incubation/intro-dora-metrics.png" - url="/images/incubation/intro-dora-metrics.png" - alt="DORA metrics" - caption="DORA metrics" - max-width="60%" -%} - -### Application management - -Manage the application lifecycle in the Codefresh UI, from creating, editing, and deleting them. - -Define all application settings in a single location through the intuitive Form mode or directly in YAML, and commit all changes to Git. -For easy access, the configuration settings are available for editing in the Applications dashboard. - -See [Applications]({{site.baseurl}}/docs/deployment/create-application/). - -{% include - image.html - lightbox="true" - file="/images/applications/add-app-general-settings.png" - url="/images/applications/add-app-general-settings.png" - alt="Application creation in Codefresh" - caption="Application creation in Codefresh" - max-width="60%" -%} - -### Third-party CI integrations - -If you have your own tools for CI pipelines and workflows, Hosted GitOps gives you the option to connect them to Codefresh and enrich deployment information with our new report image template. For example, you can add the report image step in your GitHub Actions pipeline and reference the different integrations for Codefresh to retrieve and enrich the image information. - -* Git PRs (Pull Requests), Commits, and Committer information directly from the code repositories -* Jira ticket information for correlation with deployed features -* Docker Hub or Quay for image information - -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). - -{% include - image.html - lightbox="true" - file="/images/incubation/github-action-int-settings.png" - url="/images/incubation/github-action-int-settings.png" - alt="Image enrichment with GitHub Actions integration" - caption="Image enrichment with GitHub Actions integration" - max-width="60%" -%} - -### Hosted vs. hybrid environments - -The table below highlights the main differences between hosted and hybrid environments. - -{: .table .table-bordered .table-hover} -| Functionality |Feature | Hosted | Hybrid | -| -------------- | --------------|--------------- | --------------- | -| Runtime | Installation | Provisioned by Codefresh | Provisioned by customer | -| | Runtime cluster |Managed by Codefresh | Managed by customer | -| | Number per account | Only one runtime | Multiple runtimes | -| | Upgrade | Managed by Codefresh | Managed by customer | -| | External cluster | Managed by customer | Managed by customer | -| | Uninstall | Managed by customer | Managed by customer | -| Argo CD | | Codefresh cluster | Customer cluster | -| CI Ops | Delivery Pipelines |Not supported | Supported | -| |Workflows | Not supported | Supported | -| |Workflow Templates | Not supported | Supported | -| CD Ops |Applications | Supported | Supported | -| |Image enrichment | Supported | Supported | -| | Rollouts | Supported | Supported | -|Integrations | | Supported | Supported | -|Dashboards |Home Analytics | Hosted runtime and deployments|Runtimes, deployments, Delivery Pipelines | -| |DORA metrics | Supported |Supported | -| |Applications | Supported |Supported | diff --git a/_docs/installation/codefresh-on-prem-upgrade.md b/_docs/installation/codefresh-on-prem-upgrade.md new file mode 100644 index 000000000..335b30755 --- /dev/null +++ b/_docs/installation/codefresh-on-prem-upgrade.md @@ -0,0 +1,575 @@ +--- +title: "Codefresh On-Premises Upgrade" +description: "Use the Kubernetes Codefresh Installer to upgrade your Codefresh On-Premises platform " +group: installation +redirect_from: + - /docs/enterprise/codefresh-on-prem-upgrade/ +toc: true +--- +Upgrade the Codefresh On-premises platform to the latest version: +* Prepare for the upgrade: _Before_ the upgrade, based on the version you are upgrading to, complete the required tasks +* Upgrade On-premises +* Complete post-upgrade configuration: If needed, also based on the version you are upgrading to, complete the required tasks + + +## Upgrade to 1.1.1 +Prepare for the upgrade to v1.1.1 by performing the tasks listed below. + +### Maintain backward compatibility for infrastructure services +If you have Codefresh version 1.0.202 or lower installed, and are upgrading to v1.1.1, to retain the existing images for the services listed below, update the `config.yaml` for `kcfi`. + +* `cf-mongodb` +* `cf-redis` +* `cf-rabbitmq` +* `cf-postgresql` +* `cf-nats` +* `cf-consul` + +> In the `config.yaml`, as in the example below, if needed, replace the `bitnami` prefix with that of your private repo. + +```yaml +... + +global: + ### Codefresh App domain name. appUrl is manadatory parameter + appUrl: onprem.mydomain.com + appProtocol: https + + mongodbImage: bitnami/mongodb:3.6.13-r0 # (default `mongodbImage: bitnami/mongodb:4.2`) + +mongodb: + image: bitnami/mongodb:3.6.13-r0 # (default `image: bitnami/mongodb:4.2`) + podSecurityContext: + enabled: true + runAsUser: 0 + fsGroup: 0 + containerSecurityContext: + enabled: false + +redis: + image: bitnami/redis:3.2.9-r2 # (default `image: bitnami/redis:6.0.16`) + podSecurityContext: + enabled: false + containerSecurityContext: + enabled: false + +postgresql: + imageTag: 9.6.2 # (default `imageTag:13`) + +nats: + imageTag: 0.9.4 # (default `imageTag:2.7`) + +consul: + ImageTag: 1.0.0 # (default `imageTag:1.11`) +... +``` +## Upgrade to 1.2.0 and higher +This major release **deprecates** the following Codefresh managed charts: +* Ingress +* Rabbitmq +* Redis + +See the instructions below for each of the affected charts. + +> Before the upgrade remove any seed jobs left from previous release with: + `kubectl delete job --namespace ${CF_NAMESPACE} -l release=cf ` + +> Before the upgrade remove PDBs for Redis and RabbitMQ left from previous release with: + `kubectl delete pdb cf-rabbitmq --namespace ${CF_NAMESPACE}`
      + `kubectl delete pdb cf-redis --namespace ${CF_NAMESPACE}` + +### Update configuration for Ingress chart +From version **1.2.0 and higher**, we have deprecated support for `Codefresh-managed-ingress`. +Kubernetes community public `ingress-nginx` chart replaces `Codefresh-managed-ingress` chart. For more information on the `ingress-nginx`, see [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx). + +> Parameter locations have changed as the ingress chart name was changed from `ingress` to `ingress-nginx`: + **NGINX controller** parameters are now defined under `ingress-nginx` + **Ingress object** parameters are now defined under `ingress` + +You must update `config.yaml`, if you are using: +* External ingress controllers, including ALB (Application Load Balancer) +* Codefresh-managed ingress controller with _custom_ values + +#### Update configuration for external ingress controllers + +For external ingress controllers, including ALB (Application Load Balancer), update the relevant sections in `config.yaml` to align with the new name for the ingress chart: + +* Replace `ingress` with `ingress-nginx` + +*v1.1.1 or lower* +```yaml +ingress: #disables creation of both Nginx controller deployment and Ingress objects + enabled: false +``` + +*v1.2.2 or higher* +```yaml +ingress-nginx: #disables creation of Nginx controller deployment + enabled: false + +ingress: #disables creation of Ingress objects (assuming you've manually created ingress resource before) + enabled: false +``` + +* Replace `annotations` that have been deprecated with `ingressClassName` + +*v1.1.1 or lower* +```yaml +ingress: + annotations: + kubernetes.io/ingress.class: my-non-codefresh-nginx +``` + +*v1.2.2 or higher* +```yaml +ingress-nginx: + enabled: false + +ingress: + ingressClassName: my-non-codefresh-nginx +### `kubernetes.io/ingress.class` annotation is deprecated from kubernetes v1.22+. +# annotations: +# kubernetes.io/ingress.class: my-non-codefresh-nginx +``` + +#### Update configuration for Codefresh-managed ingress with custom values + +If you were running `Codefresh-managed ingress` controller with _custom_ values refer to [values.yaml](https://github.com/kubernetes/ingress-nginx/blob/main/charts/ingress-nginx/values.yaml) from the official repo. If needed, update the `ingress-nginx` section in `config.yaml`. The example below shows the default values (already provided in Codefresh chart) for `ingress-nginx`: + +```yaml +ingress-nginx: + enabled: true + controller: + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx-codefresh" + # -- Name of the ingressClass + name: nginx-codefresh + # -- For backwards compatibility with ingress.class annotation. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx-codefresh + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: true + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: true + admissionWebhooks: + enabled: false +``` +> New `ingress-nginx` subchart creates a new `cf-ingress-nginx-controller` service (`type: LoadBalancer`) instead of old `cf-ingress-controller` service. So make sure to update DNS record for `global.appUrl` to point to a new external load balancer IP. + You can get external load balancer IP with: + `kubectl get svc cf-ingress-nginx-controller -o jsonpath={.status.loadBalancer.ingress[0].ip` + + +### Update configuration for RabbitMQ chart +From version **1.2.2 and higher**, we have deprecated support for the `Codefresh-managed Rabbitmq` chart. Bitnami public `bitnami/rabbitmq` chart has replaced the `Codefresh-managed rabbitmq`. For more information, see [bitnami/rabbitmq](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq). + +> Configuration updates are not required if you are running an **external** RabbitMQ service. + +> RabbitMQ chart was replaced so as a consequence values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/rabbitmq/values.yaml) + +**`existingPvc` changed to `existingClaim` and defined under `persistence`** + +*v1.1.1 or lower* +```yaml +rabbitmq: + existingPvc: my-rabbitmq-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +*v1.2.2 or higher* +```yaml +rabbitmq: + volumePermissions: ## Enable init container that changes the owner and group of the persistent volume from existing claim + enabled: true + persistence: + existingClaim: my-rabbitmq-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +**`storageClass` and `size` defined under `persistence`** + +*v1.1.1 or lower* +```yaml +rabbitmq: + storageClass: my-storage-class + storageSize: 32Gi +``` + +*v1.2.2 or higher* +```yaml +rabbitmq: + persistence: + storageClass: my-storage-class + size: 32Gi +``` + +### Update configuration for Redis chart +From version **1.2.2 and higher**, we have deprecated support for the `Codefresh-managed Redis` chart. Bitnami public `bitnami/redis` chart has replaced the `Codefresh-managed Redis` chart. For more information, see [bitnami/redis](https://github.com/bitnami/charts/tree/master/bitnami/redis). + +Redis storage contains **CRON and Registry** typed triggers so you must migrate existing data from the old deployment to the new stateful set. +This is done by backing up the existing data before upgrade, and then restoring the backed up data after upgrade. + +> Configuration updates are not required: + * When running an **external** Redis service. + * If CRON and Registy triggers have not been configured. + +#### Verify existing Redis data for CRON and Registry triggers +Check if you have CRON and Registry triggers configured in Redis. + +* Run `codefresh get triggers` + OR + Directly from the K8s cluster where Codefresh is installed. + +```shell +NAMESPACE=codefresh +REDIS_PASSWORD=$(kubectl get secret --namespace $NAMESPACE cf-redis -o jsonpath="{.data.redis-password}" | base64 --decode) + +kubectl exec -it deploy/cf-redis -- env REDIS_PASSWORD=$REDIS_PASSWORD bash +#once inside cf-redis pod +REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli +info keyspace # list db +select 15 # select db 15 +keys * #show keys +``` + +* If there are results, continue with _Back up existing Redis data_. + +#### Back up existing Redis data +Back up the existing data before the upgrade: + +* Connect to the pod, run `redis-cli`, export AOF data from old `cf-redis-*` pod: + +```shell +NAMESPACE=codefresh +REDIS_PASSWORD=$(kubectl get secret --namespace $NAMESPACE cf-redis -o jsonpath="{.data.redis-password}" | base64 --decode) +REDIS_POD=$(kubectl get pods -l app=cf-redis -o custom-columns=:metadata.name --no-headers=true) +kubectl cp $REDIS_POD:/bitnami/redis/data/appendonly.aof appendonly.aof -c cf-redis +``` + +#### Restore backed-up Redis data +Restore the data after the upgrade: + +* Copy `appendonly.aof` to the new `cf-redis-master-0` pod: + + ```shell + kubectl cp appendonly.aof cf-redis-master-0:/data/appendonly.aof + ```` +* Restart `cf-redis-master-0` and `cf-api` pods: + + ```shell + kubectl delete pod cf-redis-master-0 + + kubectl scale deployment cf-cfapi-base --replicas=0 -n codefresh + kubectl scale deployment cf-cfapi-base --replicas=2 -n codefresh + ``` + +> Redis chart was replaced so as a consequence values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/redis/values.yaml). + +**`existingPvc` changed to `existingClaim` and defined under `master.persistence`** + +*v1.1.1 or lower* +```yaml +redis: + existingPvc: my-redis-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +*v1.2.2 or higher* +```yaml +redis: + volumePermissions: ## Enable init container that changes the owner and group of the persistent volume from existing claim + enabled: true + master: + persistence: + existingClaim: my-redis-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +**`storageClass` and `size` defined under `master.persistence`** + + +*v1.1.1 or lower* +```yaml +redis: + storageClass: my-storage-class + storageSize: 32Gi +``` + +*v1.2.2 or higher* +```yaml +redis: + master: + persistence: + storageClass: my-storage-class + size: 32Gi +``` + +> If you run the upgrade without redis backup and restore procedure, **Helm Releases Dashboard** page might be empty for a few minutes after the upgrade. + +## Upgrade to 1.3.0 and higher +This major release **deprecates** the following Codefresh managed charts: +* Consul +* Nats + +### Update configuration for Consul +From version **1.3.0 and higher**, we have deprecated the Codefresh-managed `consul` chart, in favor of Bitnami public `bitnami/consul` chart. For more information, see [bitnami/consul](https://github.com/bitnami/charts/tree/master/bitnami/consul). + +Consul storage contains data about **Windows** worker nodes, so if you had any Windows nodes connected to your OnPrem installation, see the following instruction: + +> Use `https:///admin/nodes` to check for any existing Windows nodes. + +#### Back up existing consul data +_Before starting the upgrade_, back up existing data. + +> Because `cf-consul` is a StatefulSet and has some immutable fields in its spec with both old and new charts having the same names, you cannot perform a direct upgrade. + Direct upgrade will most likely fail with: + `helm.go:84: [debug] cannot patch "cf-consul" with kind StatefulSet: StatefulSet.apps "cf-consul" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', 'updateStrategy' and 'minReadySeconds' are forbidden` + After backing up existing data, you must delete the old StatefulSet. + + +1. Exec into the consul pod and create a snapshot: +```shell +kubectl exec -it cf-consul-0 -n codefresh -- consul snapshot save backup.snap +``` +1. Copy snapshot locally: +```shell +kubectl cp -n codefresh cf-consul-0:backup.snap backup.snap +``` +1. **Delete the old** `cf-consul` stateful set: + +```shell +kubectl delete statefulset cf-consul -n codefresh +``` + +#### Restore backed up data + +After completing the upgrade to the current version, restore the `consul` data that you backed up. + +1. Copy the snapshot back to the new pod: + +```shell +kubectl cp -n codefresh backup.snap cf-consul-0:/tmp/backup.snap +``` +1. Restore the data: +``` +kubectl exec -it cf-consul-0 -n codefresh -- consul snapshot restore /tmp/backup.snap +``` +> Consul chart was replaced, and values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/consul/values.yaml) + + +### Update Nats configuration +From version **1.3.0 and higher**, we have deprecated Codefresh-managed `nats` chart in favor of Bitnami public `bitnami/nats` chart. For more information, see [bitnami/nats](https://github.com/bitnami/charts/tree/master/bitnami/consul). + +> Because `cf-nats` is a StatefulSet and has some immutable fields in its spec, both the old and new charts have the same names, preventing a direct upgrade. + Direct upgrade will most likely fail with: + `helm.go:84: [debug] cannot patch "cf-nats" with kind StatefulSet: StatefulSet.apps "cf-nats" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', 'updateStrategy' and 'minReadySeconds' are forbidden` + After backing up existing data, you must delete the old StatefulSet. + + +* **Delete the old** `cf-nats` stateful set. + +```shell +kubectl delete statefulset cf-nats -n codefresh +``` + +> Nats chart was replaced, and values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/nats/values.yaml). + +### Upgrade to 1.3.1 and higher + +Chart **v1.3.1** fixes duplicated env vars `CLUSTER_PROVIDERS_URI` and `CLUSTER_PROVIDERS_PORT` in `cf-api` deployment. +```yaml +W1010 03:03:55.553842 280 warnings.go:70] spec.template.spec.containers[0].env[94].name: duplicate name "CLUSTER_PROVIDERS_URI" +W1010 03:03:55.553858 280 warnings.go:70] spec.template.spec.containers[0].env[95].name: duplicate name "CLUSTER_PROVIDERS_PORT" +``` + + +> Due to Helm issue [Removal of duplicate array entry removes completely from Kubernetes](https://github.com/helm/helm/issues/10741), you shoud run `kcfi deploy` or `helm upgrade` two times consecutively. + + +With chart **v1.3.1** [insecure registy](https://docs.docker.com/registry/insecure/) property has been moved under `builder` section: + +```yaml +builder: + insecureRegistries: + - "myregistrydomain.com:5000" +``` + +## Upgrade the Codefresh Platform with [kcfi](https://github.com/codefresh-io/kcfi) + +1. Locate the `config.yaml` file you used in the initial installation. +1. Change the release number inside it. + ```yaml + metadata: + kind: codefresh + installer: + type: helm + helm: + chart: codefresh + repoUrl: https://chartmuseum.codefresh.io/codefresh + version: 1.2.14 + ``` +1. Perform a dry run and verify that there are no errors: + `kcfi deploy --dry-run --debug -c codefresh/config.yaml` +1. Run the actual upgrade: + `kcfi deploy --debug -c codefresh/config.yaml` +1. Verify that all the pods are are in running state: + `kubectl -n codefresh get pods --watch` +1. Log in to the Codefresh UI, and check the new version. +1. If needed, enable/disable new feature flags. + +## Codefresh with Private Registry + +If you install/upgrade Codefresh on the air-gapped environment (without access to public registries or Codefresh Enterprise registry) you will have to copy the images to your organization container registry. + +**Obtain [image list](https://github.com/codefresh-io/onprem-images/tree/master/releases) for specific release** + +**Push images to private docker registry** + +There are 3 types of images: + +> localhost:5000 is your + +- non-Codefresh like: +``` +bitnami/mongo:4.2 +k8s.gcr.io/ingress-nginx/controller:v1.2.0 +postgres:13 +``` +convert to: +``` +localhost:5000/bitnami/mongodb:4.2 +localhost:5000/ingress-nginx/controller:v1.2.0 +localhost:5000/postgres:13 +``` +- Codefresh public images like: +``` +quay.io/codefresh/dind:20.10.13-1.25.2 +quay.io/codefresh/engine:1.147.8 +quay.io/codefresh/cf-docker-builder:1.1.14 +``` +convert to: +``` +localhost:5000/codefresh/dind:20.10.13-1.25.2 +localhost:5000/codefresh/engine:1.147.8 +localhost:5000/codefresh/cf-docker-builder:1.1.14 +``` +- Codefresh private images like: +``` +gcr.io/codefresh-enterprise/codefresh/cf-api:21.153.6 +gcr.io/codefresh-enterprise/codefresh/cf-ui:14.69.38 +gcr.io/codefresh-enterprise/codefresh/pipeline-manager:3.121.7 +``` +convert to: +``` +localhost:5000/codefresh/cf-api:21.153.6 +localhost:5000/codefresh/cf-ui:14.69.38 +localhost:5000/codefresh/pipeline-manager:3.121.7 +``` +> DELIMITERS are codefresh OR codefresh-io + +- To push images via [kcfi](https://github.com/codefresh-io/kcfi) (ver. **0.5.15** is required) use: + +`kcfi images push --help` + +> Prerequisites: sa.json to access Codefresh Enterprise GCR + +`kcfi images push --codefresh-registry-secret sa.json --images-list images-list-v1.2.14 --registry localhost:5000 --user "root" --password "root"` + +- To push images via [push-to-registry.sh](https://github.com/codefresh-io/onprem-images/blob/master/push-to-registry.sh) script use (see [prerequisites](https://github.com/codefresh-io/onprem-images#prerequesites)): + +`./push-to-registry.sh localhost:5000 v1.2.14` + +#### Install/Upgrade Codefresh with private docker registry config** + +Set `usePrivateRegistry: true`, and set privateRegistry address, username and password in `config.yaml`. + +For Bitnami helm charts ([consul](https://github.com/bitnami/charts/blob/main/bitnami/consul/values.yaml), [nats](https://github.com/bitnami/charts/blob/main/bitnami/nats/values.yaml), [redis](https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml), [rabbitmq](https://github.com/bitnami/charts/blob/main/bitnami/rabbimq/values.yaml)) define `global.imageRegistry`. + +For [ingress-nginx](https://github.com/kubernetes/ingress-nginx/blob/main/charts/ingress-nginx/values.yaml) chart define `ingress-nginx.controller.image.registry`. + + +`config.yaml` + +```yaml +global: + imageRegistry: myregistry.domain.com + +ingress-nginx: + controller: + image: + registry: myregistry.domain.com + +images: + codefreshRegistrySa: sa.json + usePrivateRegistry: true + privateRegistry: + address: myregistry.domain.com + username: + password: +``` + \ No newline at end of file diff --git a/_docs/installation/codefresh-on-prem.md b/_docs/installation/codefresh-on-prem.md new file mode 100644 index 000000000..f314b7f02 --- /dev/null +++ b/_docs/installation/codefresh-on-prem.md @@ -0,0 +1,1237 @@ +--- +title: "Codefresh On-Prem Installation & Configuration" +description: "Use the Kubernetes Codefresh Installer to install the Codefresh On-Premises platform " +group: installation +redirect_from: + - /docs/enterprise/codefresh-on-prem/ +toc: true +--- + + +This article will guide you through the installation of the Codefresh platform on your on-prem environment. This article covers all aspects of installation and configuration. Please read the article carefully before installing Codefresh. + +[kcfi](https://github.com/codefresh-io/kcfi) (the Kubernetes Codefresh Installer) is a one-stop-shop for this purpose. Even though Codefresh offers multiple tools to install components, `kcfi` aggregates all of them into a single tool. + +## Survey: What Codefresh needs to know + +Fill out this survey before the installation to make sure your on-prem environment is ready for deployment: + +[Survey](https://docs.google.com/forms/d/e/1FAIpQLSf18sfG4bEQuwMT7p11F6q70JzWgHEgoAfSFlQuTnno5Rw3GQ/viewform) + +## On-prem system requirements + +{: .table .table-bordered .table-hover} +| Item | Requirement | +| -------------- | -------------- | +|Kubernetes cluster | Server versions v1.19 through v1.22. {::nomarkdown}
      Note: Maintenance support for Kubernetes v1.19 ended on Oct 28, 2021.{:/}| +|Operating systems|{::nomarkdown}
      • Windows 10/7
      • Linux
      • OSX
      • {:/}| +|Node requirements| {::nomarkdown}
        • Memory: 5000 MB
        • CPU: 2
        {:/}| +|Git providers |{::nomarkdown}
        • GitHub: SaaS and on-premises versions
        • Bitbucket: SaaS and Bitbucket server (on-premises) 5.4.0 version and above
        • GitLab: SaaS and on-premise versions (API v4 only)
        {:/}| +|Node size | {::nomarkdown}
        • Single node: 8 CPU core and 16GB RAM
        • Multi node: master(s) + 3 nodes with 4 CPU core and 8GB RAM (24 GB in total)
        {:/}| + + + +## Prerequisites + +### Service Account file +The GCR Service Account JSON file, `sa.json` is provided by Codefresh. Contact support to get the file before installation. + +### Default app credentials +Also provided by Codefresh. Contact support to get them file before installation. + +### TLS certificates +For a secured installation, we highly recommend using TLS certificates. Make sure your `ssl.cert` and `private.key` are valid. + +> Use a Corporate Signed certificate, or any valid TLS certificate, for example, from lets-encrypt. + +### Interent connections +We require outbound internet connections for these services: +* GCR to pull platform images +* Dockerhub to pull pipeline images + + +## Security Constraints + +Codefresh has some security assumptions about the Kubernetes cluster it is installed on. + +### RBAC for Codefresh + +The Codefresh installer should be run with a Kubernetes RBAC role that allows object creation in a single namespace. If, by corporate policy, you do not allow the creation of service accounts or roles, a Kubernetes administrator will need to create the role, service account, and binding as shown below. + +>Users with the `codefresh-app` role cannot create other roles or role bindings. + +`codefresh-app-service-account.yaml` +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: codefresh-app + namespace: codefresh +``` + +`codefresh-app-role.yaml` +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: codefresh-app + namespace: codefresh +rules: +- apiGroups: + - "" + - apps + - codefresh.io + - autoscaling + - extensions + - batch + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.k8s.io + - route.openshift.io + - policy + resources: + - routes + - ingresses + - poddisruptionbudgets + verbs: + - '*' +``` + +`codefresh-app-roleBinding.yaml` +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: codefresh + name: codefresh-app-binding + namespace: codefresh +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: codefresh-app +subjects: +- kind: ServiceAccount + name: codefresh-app +``` + +To apply these changes, run: + +``` +kubectl apply -f [file] +``` + +### Operator CRD + +If, due to security rules you are not allowed to create a CRD for a client running `kcfi`, have an Administrator create the RBAC (as instructed above) and the CRD as follows: + +`codefresh-crd.yaml` +```yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: codefreshes.codefresh.io + labels: + app: cf-onprem-operator +spec: + group: codefresh.io + names: + kind: Codefresh + listKind: CodefreshList + plural: codefreshes + singular: codefresh + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha1 + served: true + storage: true +``` + +To apply these changes, run: +``` +kubectl apply -f codefresh-crd.yaml +``` + +You will also need to modify the `config.yaml` for `kcfi` by setting `skipCRD: true` and `serviceAccountName: codefresh-app`: + +`config.yaml` +```yaml + operator: + #dockerRegistry: gcr.io/codefresh-enterprise + #image: codefresh/cf-onprem-operator + #imageTag: + serviceAccountName: codefresh-app + skipCRD: true +``` + +## Install the Codefresh Platform + +### Before you begin + +### Step1 : Download and extract `kcfi` +Download the binary for `kcfi`. It is a single binary without dependencies. + +1. Download the binary from [GitHub](https://github.com/codefresh-io/kcfi/releases){:target="\_blank"}. + >Note: Darwin is for OSX +1. Extract the downloaded file. +1. Copy the file to your $PATH: `cp /path/to/kcfi /usr/local/bin` + +### Step 2: Set the current context +* Make sure you have a `kubeconfig` file with the correct context, as in this example: + +``` +kubectl config get-contexts # display list of contexts +kubectl config use-context my-cluster-name # set the default context to my-cluster-name +kubectl config current-context # verify the current-context` +``` +### Step 3: Initialize and configure `config.yaml` +Prepare the platform for installation by initializing the directory with `config.yaml`. Then edit `config.yaml` and configure all installation settings, including files and directories required, and then deploy to Kubernetes. + +The `config.yaml` is includes descriptions for every parameter. + +1. Create the directory with the `config.yaml`: + +``` +kcfi init codefresh [-d /path/to/stage-dir] +``` +1. Below `installer`, define your installation method as either Helm or Codefresh CRD: + +```yaml + installer: + # type: + # "operator" - apply codefresh crd definition + # "helm" - install/upgrade helm chart from client +``` +1. If you are installing Codefresh in an air-gapped environment (without access to public Docker Hub or codefresh-enterprise registry), copy the images to your organization container registry (Kubernetes will pull the images from it as part of the installation). + + 1. Set `usePrivateRegistry` to `true`. + 1. Define `privateRegistry` `address`, `username` and `password`. + + +```yaml +images: + codefreshRegistrySa: sa.json + # usePrivateRegistry: false + # privateRegistry: + # address: + # username: + # password: + lists: + - images/images-list +``` +1. Push all or a single image: + * All images: + ``` + kcfi images push [-c|--config /path/to/config.yaml] + ``` + * Single image: + ``` + kcfi images push [-c|--config /path/to/config.yaml] [options] repo/image:tag [repo/image:tag] + ``` + + > To get the full list of options, run `kcfi images --help`. + + >Even if you are running a Kubernetes cluster with outgoing access to the public internet, note that Codefresh platform images are not public and can be obtained by using `sa.json` file provided by Codefresh support personnel. + Use the flag `--codefresh-registry-secret` to pass the path to the file `sa.json`. + +### Step 4: (Optional) Configure TLS certificates +If you are using TLS, enable it in `config.yaml`. + +1. Set `tls.selfSigned =false`. +1. Place both `ssl.crt` and `private.key` into certs/ directory. + +### Step 5: Deploy On-premises platform + +1. Run: + +``` +kcfi deploy [ -c config.yaml ] [ --kube-context ] [ --atomic ] [ --debug ] [ helm upgrade parameters ] +``` +### Step 6: Install the Codefresh Kubernetes Agent + +Install the `cf-k8s-agent` on a cluster separate from the installer, or in a different namespace on the same cluster. +The `cf-k8s-agent` accesses Kubernetes resources (pods, deployments, services, etc.) behind the firewall to display them in the Codefresh UI. The agent streams updates from cluster resources and then sends information updates to the `k8s-monitor` service. + +1. Create a staging directory for the agent: + +``` +kcfi init k8s-agent +``` + A staging directory is created, named k8s-agent with a `config.yaml`. +1. Edit k8s-agent/config.yaml ?? for what?? + +1. Run: + +``` +kcfi deploy [ -c config.yaml ] [-n namespace] +``` + where: + [namespace] is the namespace if you are installing the agent in the same cluster. + + + + +## High-Availability (HA) with active-passive clusters +Enable high-availability in the Codefresh platform for disaster recovery with an active-passive cluster configuration. +Review the prerequisites, and then do the following to configure high-availability: +* For new installations, install Codefresh on the active cluster +* Install Codefresh on the passive cluster +* When needed, switch between clusters for disaster recovery + +### Prerequisites + +* **K8s clusters** + Two K8s clusters, one designated as the active cluster, and the other designated as the passive cluster for disaster recovery. + +* **External databases and services** + Databases and services external to the clusters. + + * Postgres database (see [Configuring an external Postgres database](#configuring-an-external-postgres-database)) + * MongoDB (see [Configuring an external MongoDB](#configuring-an-external-mongodb)) + * Redis service (see [Configuring an external Redis service](#configure-an-external-redis-service)) + * RabbitMQ service (see [Configuring an external RabbitMQ service](#configure-an-external-redis-service)) + * Consul service (see [Configuring an external Consul service](#configuring-an-external-consul-service)) + +* **DNS record** + To switch between clusters for disaster recovery + +### Install Codefresh on active cluster + +If you are installing Codefresh for the first time, install Codefresh on the cluster designated as the _active_ cluster. +See [Installing the Codefresh platform]({{site.baseurl}}/docs/administration/codefresh-on-prem/#install-the-codefresh-platform). + +### Install Codefresh on passive cluster + +First get the `values.yaml` file from the current Codefresh installation on the active cluster. Then install Codefresh on the passive cluster using Helm. + +**1. Get values.yaml** +1. Switch your kube context to the active cluster. +1. Get `values.yaml` from the active cluster: + `helm get values ${release_name} -n ${namespace} > cf-passive-values.yaml` + where: + `{release-version}` is the name of the Codefresh release, and is by default `cf`. + `${namespace}` is the namespace with the Codefresh release, and is by default `codefresh`. + +{:start="3"} +1. Update the required variables in `cf-passive-values.yaml`. + > If the variables do not exist, add them to the file. + + * In the `global` section, disable `seedJobs` by setting it to `false`: + + ```yaml + global: + seedJobs: false + ``` + + * Add variable `FREEZE_WORKFLOWS_EXECUTION` to `cfapi`, and set it to `true`. + + ```yaml + cfapi: + env: + FREEZE_WORKFLOWS_EXECUTION: true + ``` + +**2. Install Codefresh on passive cluster** + +1. Download the Helm chart: + `helm repo add codefresh-onprem https://chartmuseum.codefresh.io/codefresh` + `helm fetch codefresh-onprem/codefresh --version ${release-version}` + where: + `{release-version}` is the version of Codefresh you are downloading. + +1. Unzip the Helm chart: + `tar -xzf codefresh-${release-version}.tgz` +1. Go to the folder where you unzipped the Helm chart. +1. Install Codefresh with the Helm command using `cf-passive-values.yaml`: + `helm install cf . -f ${path}/cf-passive-values.yaml -n codefresh` + + +### Switch between clusters for disaster recovery + +For disaster recovery, switch between the active and passive clusters. + +1. In the `cfapi` deployment on the _active_ cluster, change the value of `FREEZE_WORKFLOWS_EXECUTION` from `false` to `true`. + If the variable does not exist, add it, and make sure the value is set to `true`. +1. In the `cfapi` deployment on the _passive_ cluster, change the value of `FREEZE_WORKFLOWS_EXECUTION` from `true` to `false`. +1. Switch DNS from the currently active cluster to the passive cluster. + +### Services without HA + +The following services cannot run in HA, but are not critical in case of downtime or during the process of switchover from active to passive. +These services are not considered critical as they are part of build-handling. In case of failure, a build retry occurs, ensuring that the build is always handled. +* `cronus` +* `cf-sign` + + +## Additional configuration + +After you install Codefresh, these are post-installation operations that you should follow. + +### Selectively enable SSO provider for account +As a Codefresh administrator, you can select the providers you want to enable for SSO in your organization, for both new and existing accounts. +You can always renable a provider when needed. + + +1. Sign in as Codefresh admin. +1. From the left pane, select **Providers**. +1. Disable the providers not relevant for the accounts. +These providers are not displayed as options during sign-up/sign-in. + + +### (Optional) Set up Git integration + +Codefresh supports out-of-the-box Git logins using your local username and password, or logins using your Git provider, as described below.You can also configure login to supported SSO providers after installation, as described in [Setting up OpenID Connect (OIDC) Federated Single Sign-On (SSO)]({{site.baseurl}}/docs/administration/single-sign-on/oidc). + +If you’d like to set up a login to Codefresh using your Git provider, first login using the default credentials (username: `AdminCF`, password: `AdminCF` and add your Git provider OAuth integration details in our admin console: + +**Admin Management** > **IDPs** tab + +To get the Client ID and Client Secret for each of the supported Git providers, follow the instructions according to your VCS provider. + +#### GitHub Enterprise + +Navigate to your GitHub organization settings: https://github.com/organizations/your_org_name/settings. + +On the left-hand side, under **Developer settings**, select **OAuth Apps**, and click **Register an Application**. + +Complete the OAuth application registration as follows: + +- **Application name:** codefresh-on-prem (or a significant name) +- **Homepage URL:** https://your-codefresh-onprem-domain +- **Authorization callback URL:** https://your-codefresh-onprem-domain/api/auth/github/callback + +After registration, note down the created Client ID and Client Secret. They will be required for the settings in **Codefresh Admin**->**IDPs** + +#### GitLab + +Navigate to your Applications menu in GitLab User Settings: https://gitlab.com/profile/applications + +Complete the application creation form as follows: + +- **Name:** codefresh-onprem (or a significant name) +- **Redirect URI:** https://your-codefresh-onprem-domain/api/auth/gitlab/callback +- **Scopes (permissions):** + - API + - read_user + - read_registry + +Click **Save application**. + +After app creation, note down the created Application ID and Client Secret. They will be required for the settings in **Codefresh Admin**->**IDPs**. + +{% include image.html + lightbox="true" + file="/images/installation/git-idp.png" + url="/images/installation/git-idp.png" + %} + +>Note: When configuring the default IDP (for GitHub, GitLab, etc), do not modify the Client Name field. Please keep them as GitHub, GitLab, BitBucket, etc. Otherwise, the signup and login views won’t work. + +### Proxy Configuration + +If your environment resides behind HTTP proxies, you need to uncomment the following section in `config.yaml`: + +```yaml +global: + env: + HTTP_PROXY: "http://myproxy.domain.com:8080" + http_proxy: "http://myproxy.domain.com:8080" + HTTPS_PROXY: "http://myproxy.domain.com:8080" + https_proxy: "http://myproxy.domain.com:8080" + NO_PROXY: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-nginx-controller,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis-master,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" + no_proxy: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-nginx-controller,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis-master,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" +``` +In addition to this, you should also add your Kubernetes API IP address (`kubectl get svc kubernetes`) to both: `NO_PROXY` and `no_proxy`. + +### Storage + +Codefresh is using both cluster storage (volumes) as well as external storage. + +#### Databases + +The following table displays the list of databases created as part of the installation: + +| Database | Purpose | Latest supported version | +|----------|---------| ---------------| +| mongoDB | storing all account data (account settings, users, projects, pipelines, builds etc.) | 4.2.x | +| postgresql | storing data about events that happened on the account (pipeline updates, deletes, etc.). The audit log uses the data from this database. | 13.x | +| redis | mainly used for caching, but also used as a key-value store for our trigger manager. | 6.0.x | + +#### Volumes + +These are the volumes required for Codefresh on-premises: + + +{: .table .table-bordered .table-hover} +| Name | Purpose | Minimum Capacity | Can run on netfs (nfs, cifs) | +|----------------|------------------------|------------------|------------------------------| +| cf-mongodb* | Main database - Mongo | 8GB | Yes** | +| cf-postgresql* | Events databases - Postgres | 8GB | Yes** | +| cf-rabbitmq* | Message broker | 8GB | No** | +| cf-redis* | Cache | 8GB | No** | +| cf-store | Trigger Redis data | 8GB | No** | +| cf-cronus | Trigger crontab data | 1GB | Yes | +| datadir-cf-consul-0 | Consul datadir | 1GB | Yes | +| cf-chartmuseum | chartmuseum | 10GB | Yes | +| cf-builder-0 | /var/lib/docker for builder | 100GB | No*** | +| cf-runner-0 | /var/lib/docker for composition runner | 100GB | No*** | + +{% raw %} + + (*) Possibility to use external service + + (**) Running on netfs (nfs, cifs) is not recommended by product admin guide + + (***) Docker daemon can be run on block device only + +{% endraw %} + +StatefulSets (`cf-builder` and `cf-runner`) process their data on separate physical volumes (PVs) and can be claimed using Persistent Volume Claims (PVCs) with default initial sizes of 100Gi. Also, those StatefulSets have the ability to connect to existing pre-defined PVCs. + +The default initial volume size (100 Gi) can be overridden in the custom `config.yaml` file. Values descriptions are in the `config.yaml` file. +The registry’s initial volume size is 100Gi. It also can be overridden in a custom `config.yaml` file. There is a possibility to use a customer-defined registry configuration file (`config.yaml`) that allows using different registry storage back-ends (S3, Azure Blob, GCS, etc.) and other parameters. More details can be found in the [Docker documentation](https://docs.docker.com/registry/configuration/). + +Depending on the customer’s Kubernetes version we can assist with PV resizing. Details are can be found in this [Kubernetes blog post](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/). + +#### Automatic Volume Provisioning + +Codefresh installation supports automatic storage provisioning based on the standard Kubernetes dynamic provisioner Storage Classes and Persistent Volume Claims. All required installation volumes will be provisioned automatically using the default Storage Class or custom Storage Class that can be specified as a parameter in `config.yaml` under `storageClass: my-storage-class`. + + + +### Retention policy for Codefresh builds +Define a retention policy to manage Codefresh builds. The retention settings are controlled through `cf-api` deployment environment variables, all of which have default settings which you can retain or customize. By default, Codefresh deletes builds older than six months, including offline logs. + +The retention mechanism, implemented as a Cron Job, removes data from collections such as: +* workflowproccesses +* workflowrequests +* workflowrevisions + +{: .table .table-bordered .table-hover} +| Env Variable | Description | Default | +|---------------|--------------------------- |---------------------- | +|`RETENTION_POLICY_IS_ENABLED` | Determines if automatic build deletion through the Cron job is enabled. | `true` | +|`RETENTION_POLICY_BUILDS_TO_DELETE`| The maximum number of builds to delete by a single Cron job. To avoid database issues, especially when there are large numbers of old builds, we recommend deleting them in small chunks. You can gradually increase the number after verifying that performance is not affected. | `50` | +|`RETENTION_POLICY_DAYS` | The number of days for which to retain builds. Builds older than the defined retention period are deleted. | `180` | +|`RUNTIME_MONGO_URI` | Optional. The URI of the Mongo database from which to remove MongoDB logs (in addition to the builds). | | + + +### Managing Codefresh backups + +Codefresh on-premises backups can be automated by installing a specific service as an addon to your Codefresh on-premises installation. It is based on the [mgob](https://github.com/stefanprodan/mgob){:target="\_blank"} open source project, and can run scheduled backups with retention, S3 & SFTP upload, notifications, instrumentation with Prometheus and more. + +#### Configure and deploy the Backup Manager + +Backup Manager is installed as an addon and therefore it needs an existing Codefresh on-premises installation. +Before installing it, please make sure you have selected a proper kube config pointing to the cluster, where you have Codefresh installed on. + +1. Go to the staging directory of your Codefresh installation, and open the config file: `your-CF-stage-dir/addons/backup-manager/config.yaml`. +1. Retain or customize the values of these configuration parameters: + * `metadada`: Various CF-installer-specific parameters, which should not be changed in this case + * `kubernetes`: Specify a kube context, kube config file, and a namespace for the backup manager + * `storage`: Storage class, storage size and read modes for persistent volumes to store backups locally within your cluster + * Backup plan configuration parameters under `jobConfigs.cfBackupPlan`: + * `target.uri` - target mongo URI. It is recommended to leave the mongo uri value blank - it will be taken automatically from the Codefresh release installed in your cluster + * `scheduler` - here you can specify cron expression for your backups schedule, backups retention and timeout values + +For more advanced backup plan settings, such as specifying various remote cloud-based storage providers for your backups, configuring notifications and other, please refer to [this](https://github.com/stefanprodan/mgob#configure) page + +To **deploy the backup manager** service, please select a correct kube context, where you have Codefresh on-premises installed and deploy backup-manager with the following command: + +``` +kcfi deploy -c `your-CF-stage-dir/addons/backup-manager/config.yaml` +``` + +#### On-demand/ad-hoc backup +``` +kubectl port-forward cf-backup-manager-0 8090 +curl -X POST http://localhost:8090/backup/cfBackupPlan +``` + +#### Restore from backup +``` +kubectl exec -it cf-backup-manager-0 bash +mongorestore --gzip --archive=/storage/cfBackupPlan/backup-archive-name.gz --uri mongodb://root:password@mongodb:27017 --drop +``` + +### Configuring AWS Load Balancers + +By default Codefresh deploys the [ingress-nginx](https://github.com/kubernetes/ingress-nginx/) controller and [Classic Load Balancer](https://docs.aws.amazon.com/eks/latest/userguide/load-balancing.html) as a controller service. + +#### NLB + +To use a **Network Load Balancer** - deploy a regular Codefresh installation with the following ingress config for the the `cf-ingress-controller` controller service. + +`config.yaml` +```yaml +ingress-nginx: + controller: + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '60' + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' + +tls: + selfSigned: false + cert: certs/certificate.crt + key: certs/private.key +``` +This annotation will create a new Load Balancer - Network Load Balancer, which you should use in the Codefresh UI DNS record. +Update the DNS record according to the new service. + +#### L7 ELB with SSL Termination + +When a **Classic Load Balancer** is used, some Codefresh features that (for example `OfflineLogging`), will use a websocket to connect with Codefresh API and they will require secure TCP (SSL) protocol enabled on the Load Balancer listener instead of HTTPS. + +To use either a certificate from a third party issuer that was uploaded to IAM or a certificate [requested](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) within AWS Certificate Manager see the followning config example: + + +`config.yaml` +```yaml +ingress-nginx: + controller: + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: < CERTIFICATE ARN > + targetPorts: + http: http + https: http + +tls: + selfSigned: true +``` + +- both http and https target port should be set to **80**. +- update your AWS Load Balancer listener for port 443 from HTTPS protocol to SSL. + +#### ALB + +To use the **Application Load Balancer** the [ALB Ingress Controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) should be deployed to the cluster. + +To support ALB: + +- First disable Nginx controller in the Codefresh init config file - __config.yaml__: + +```yaml +ingress-nginx: #disables creation of Nginx controller deployment + enabled: false + +ingress: #disables creation of Ingress object + enabled: false +``` + +- [deploy](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) the ALB controller; +- create a new **ingress** resource: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + kubernetes.io/ingress.class: alb + meta.helm.sh/release-name: cf + meta.helm.sh/release-namespace: codefresh + labels: + app: cf-codefresh + release: cf + name: cf-codefresh-ingress + namespace: codefresh +spec: + defaultBackend: + service: + name: cf-cfui + port: + number: 80 + rules: + - host: myonprem.domain.com + http: + paths: + - backend: + service: + name: cf-cfapi + port: + number: 80 + path: /api/* + pathType: ImplementationSpecific + - backend: + service: + name: cf-cfapi + port: + number: 80 + path: /ws/* + pathType: ImplementationSpecific + - backend: + service: + name: cf-cfui + port: + number: 80 + path: / + pathType: ImplementationSpecific +``` + +### Configure CSP (Content Security Policy) +Add CSP environment variables to `config.yaml`, and define the values to be returned in the CSP HTTP headers. +```yaml +cfui: + env: + CONTENT_SECURITY_POLICY: "" + CONTENT_SECURITY_POLICY_REPORT_ONLY: "default-src 'self'; font-src 'self' + https://fonts.gstatic.com; script-src 'self' https://unpkg.com https://js.stripe.com; + style-src 'self' https://fonts.googleapis.com; 'unsafe-eval' 'unsafe-inline'" + CONTENT_SECURITY_POLICY_REPORT_TO: "" +``` +`CONTENT_SECURITY_POLICY` is the string describing content policies. Use semi-colons to separate between policies. +`CONTENT_SECURITY_POLICY_REPORT_TO` is a comma-separated list of JSON objects. Each object must have a name and an array of endpoints that receive the incoming CSP reports. + +For detailed information, see the [Content Security Policy article on MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP). + +### Enable x-hub-signature-256 signature for GitHub AE +Add the `USE_SHA256_GITHUB_SIGNATURE` environment variable to **cfapi** deployment in `config.yaml`. +```yaml +cfapi: + env: + USE_SHA256_GITHUB_SIGNATURE: "true" +``` + +For detailed information, see the [Securing your webhooks](https://docs.github.com/en/developers/webhooks-and-events/webhooks/securing-your-webhooks) and [Webhooks](https://docs.github.com/en/github-ae@latest/rest/webhooks). + + +## Using existing external services for data storage/messaging + +Normally the Codefresh installer, is taking care of all needed dependencies internally by deploying the respective services (mongo, redis etc) on its own. + +You might want however to use your own existing options if you already have those services up and running externally. + +### Configuring an external Postgres database + +It is possible to configure Codefresh to work with your existing Postgres database service, if you don't want to use the default one as provided by the Codefresh installer. + +#### Configuration steps + +All the configuration comes down to putting a set of correct values into your Codefresh configuration file `config.yaml`, which is present in `your/stage-dir/codefresh` directory. During the installation, Codefresh will run a seed job, using the values described in the following steps: + +1. Specify a user name `global.postgresSeedJob.user` and password `global.postgresSeedJob.password` for a seed job. This must be a privileged user allowed to create databases and roles. It will be used only by the seed job to create the needed database and a user. +2. Specify a user name `global.postgresUser` and password `global.postgresPassword` to be used by Codefresh installation. A user with the name and password will be created by the seed job and granted with required privileges to access the created database. +3. Specify a database name `global.postgresDatabase` to be created by the seed job and used by Codefresh installation. +4. Specify `global.postgresHostname` and optionally `global.postgresPort` (`5432` is a default value). +5. Disable the postgres subchart installation with the `postgresql.enabled: false` value, because it is not needed in this case. + + +Below is an example of the relevant piece of `config.yaml`: + +```yaml +global: + postgresSeedJob: + user: postgres + password: zDyGp79XyZEqLq7V + postgresUser: cf_user + postgresPassword: fJTFJMGV7sg5E4Bj + postgresDatabase: codefresh + postgresHostname: my-postgres.ccjog7pqzunf.us-west-2.rds.amazonaws.com + postgresPort: 5432 + +postgresql: + enabled: false #disable default postgresql subchart installation +``` +#### Running the seed job manually + +If you prefer running the seed job manually, you can do it by using a script present in `your/stage-dir/codefresh/addons/seed-scripts` directory named `postgres-seed.sh`. The script takes the following set of variables that you need to have set before running it: + +```shell +export POSTGRES_SEED_USER="postgres" +export POSTGRES_SEED_PASSWORD="zDyGp79XyZEqLq7V" +export POSTGRES_USER="cf_user" +export POSTGRES_PASSWORD="fJTFJMGV7sg5E4Bj" +export POSTGRES_DATABASE="codefresh" +export POSTGRES_HOST="my-postgres.ccjog7pqzunf.us-west-2.rds.amazonaws.com" +export POSTGRES_PORT="5432" +``` +The variables have the same meaning as the configuration values described in the previous section about Postgres. + +However you **still need to specify a set of values** in the Codefresh config file as described in the section above, but with the whole **`postgresSeedJob` section omitted**, like this: + +```yaml +global: + postgresUser: cf_user + postgresPassword: fJTFJMGV7sg5E4Bj + postgresDatabase: codefresh + postgresHostname: my-postgresql.prod.svc.cluster.local + postgresPort: 5432 + +postgresql: + enabled: false #disable default postgresql subchart installation +``` + +### Configuring an external MongoDB + +Codefresh recommends to use the Bitnami MongoDB [chart](https://github.com/bitnami/charts/tree/master/bitnami/mongodb) as a Mongo database. The supported version of Mongo is 4.2.x + +To configure Codefresh on-premises to use an external Mongo service one needs to provide the following values in `config.yaml`: + +- **mongo connection string** - `mongoURI`. This string will be used by all of the services to communicate with mongo. Codefresh will automatically create and add a user with "ReadWrite" permissions to all of the created databases with the username and password from the URI. Optionally, automatic user addition can be disabled - `mongoSkipUserCreation`, in order to use already existing user. In such a case the existing user must have **ReadWrite** permissions to all of newly created databases +Codefresh does not support [DNS Seedlist Connection Format](https://docs.mongodb.com/manual/reference/connection-string/#connections-dns-seedlist) at the moment, use the [Standard Connection Format](https://docs.mongodb.com/manual/reference/connection-string/#connections-standard-connection-string-format) instead. +- mongo **root user** name and **password** - `mongodbRootUser`, `mongodbRootPassword`. The privileged user will be used by Codefresh only during installation for seed jobs and for automatic user addition. After installation, credentials from the provided mongo URI will be used. Mongo root user must have permissions to create users. + +See the [Mongo required Access](https://docs.mongodb.com/manual/reference/method/db.createUser/#required-access) for more details. + +Here is an example of all the related values: + +```yaml +global: + mongodbRootUser: + mongodbRootPassword: + mongoURI: + mongoSkipUserCreation: true + mongoDeploy: false # disables deployment of internal mongo service + +mongo: + enabled: false + ``` + +#### MongoDB with Mutual TLS + +>The option available in kcfi **v0.5.10** + +Codefresh supports enabling SSL/TLS between cf microservices and MongoDB. To enable this option specify in `config.yaml` the following parameters: + + `global.mongoTLS: true`
        + `global.mongoCaCert` - CA certificate file path (in kcfi init directory)
        + `global.mongoCaKey` - CA certificate private key file path (in kcfi init directory) + +`config.yaml` example: +```yaml +global: + mongodbRootUser: root + mongodbRootPassword: WOIqcSwr0y + mongoURI: mongodb://my-mongodb.prod.svc.cluster.local/?ssl=true&authMechanism=MONGODB-X509&authSource=$external + mongoSkipUserCreation: true + mongoDeploy: false # disables deployment of internal mongo service + + mongoTLS: true #enable MongoDB TLS support + mongoCaCert: mongodb-ca/ca-cert.pem + mongoCaKey: mongodb-ca/ca-key.pem + + ### for OfflineLogging feature + runtimeMongoURI: mongodb://my-mongodb.prod.svc.cluster.local/?ssl=true&authMechanism=MONGODB-X509&authSource=$external + +### for OfflineLogging feature +cfapi: + env: + RUNTIME_MONGO_TLS: "true" + RUNTIME_MONGO_TLS_VALIDATE: "true" # 'false' if self-signed certificate to avoid x509 errors + +## set MONGO_MTLS_VALIDATE to `false` if self-signed certificate to avoid x509 errors +cluster-providers: + env: + MONGO_MTLS_VALIDATE: "false" + +k8s-monitor: + env: + MONGO_MTLS_VALIDATE: "false" + +mongo: + enabled: false #disable default mongodb subchart installation + ``` + + >Perform an upgarde:
        + >`kcfi deploy -c config.yaml --debug` + +### Configure an external Redis service +Codefresh recommends to use the Bitnami Redis [chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) as a Redis store. + +**Limitations** + +Codefresh does not support secure connection to Redis (TLS) and AUTH username extension. + +**Configuration** + +To configure Codefresh to use an external Redis service, add the following parameters to your `config.yaml`: + +`config.yaml` example: +```yaml +global: + redisUrl: my-redis.prod.svc.cluster.local + redisPort: 6379 + redisPassword: 6oOhHI8fI5 + + runtimeRedisHost: my-redis.prod.svc.cluster.local + runtimeRedisPassword: 6oOhHI8fI5 + runtimeRedisPort: 6379 + runtimeRedisDb: 2 + +redis: + enabled: false #disable default redis subchart installation +``` + +Where `redis*` - are for the main Redis storage, and `runtimeRedis*` - for storage is used to store pipeline logs in case of `OfflineLogging` feature is turned on. In most cases the host value is the same for these two values. + + +### Configuring an external RabbitMQ service + +Codefresh recommends to use the Bitnami RabbitMQ [chart](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq) as a RabbitMQ service. + +To use an external RabbitMQ service instead of the local helm chart, add the following values to the __config.yaml__: + +```yaml +rabbitmq: + enabled: false + +global: + rabbitmqUsername: + rabbitmqPassword: + rabbitmqHostname: +``` + +### Configuring an external Consul service + + +Notice that at the moment Codefresh supports only the deprecated Consul API (image __consul:1.0.0__), and does not support connection via HTTPS and any authentication. +The Consul host must expose port `8500`. + +>In general, we don't recommend to take the Consul service outside the cluster. + + +To configure Codefresh to use your external Consul service, add the following values to the __config.yaml__: + +```yaml +global: + consulHost: + +consul: + enabled: false +``` + +## App Cluster Autoscaling + +Autoscaling in Kubernetes is implemented as an interaction between Cluster Autoscaler and Horizontal Pod Autoscaler + +{: .table .table-bordered .table-hover} +| | Scaling Target| Trigger | Controller | How it Works | +| ----------- | ------------- | ------- | --------- | --------- | +| [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler)| Nodes | **Up:** Pending pod
        **Down:** Node resource allocations is low | On GKE we can turn on/off autoscaler and configure min/max per node group can be also installed separately | Listens on pending pods for scale up and node allocations for scaledown. Should have permissions to call cloud api. Considers pod affinity, pdb, storage, special annotations | +| [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | replicas on deployments or StatefulSets | metrics value thresholds defined in HPA object | part of Kubernetes controller | Controller gets metrics from "metrics.k8s.io/v1beta1" , "custom.metrics.k8s.io/v1beta1", "external.metrics.k8s.io/v1beta1" requires [metrics-server](https://github.com/kubernetes-sigs/metrics-server) and custom metrics adapters ([prometheus-adapter](https://github.com/kubernetes-sigs/prometheus-adapter), [stackdriver-adapter](https://github.com/GoogleCloudPlatform/k8s-stackdriver/tree/master/custom-metrics-stackdriver-adapter)) to listen on this API (see note (1) below) and adjusts deployment or sts replicas according to definitions in HorizontalPodAutocaler
        There are v1 and beta api versions for HorizontalPodAutocaler:
        [v1](https://github.com/kubernetes/api/blob/master/autoscaling/v1/types.go) - supports for resource metrics (cpu, memory) - `kubect get hpa`
        [v2beta2](https://github.com/kubernetes/api/blob/master/autoscaling/v2beta2/types.go) and [v2beta1](https://github.com/kubernetes/api/blob/master/autoscaling/v2beta1/types.go) - supports for both resource and custom metrics - `kubectl get hpa.v2beta2.autoscaling` **The metric value should decrease on adding new pods.**
        *Wrong metrics Example:* request rate
        *Right metrics Example:* average request rate per pod | + +Note (1) +``` +kubectl get apiservices | awk 'NR==1 || $1 ~ "metrics"' +NAME SERVICE AVAILABLE AGE +v1beta1.custom.metrics.k8s.io monitoring/prom-adapter-prometheus-adapter True 60d +v1beta1.metrics.k8s.io kube-system/metrics-server True 84d +``` + + +**Implementation in Codefresh** + +* Default “Enable Autoscaling” settings for GKE +* Using [prometheus-adapter](https://github.com/kubernetes-sigs/prometheus-adapter) with custom metrics + +We define HPA for cfapi and pipeline-manager services + +**CFapi HPA object** + +It's based on three metrics (HPA controller scales of only one of the targetValue reached): + +``` +kubectl get hpa.v2beta1.autoscaling cf-cfapi -oyaml +``` + +{% highlight yaml %} +{% raw %} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + annotations: + meta.helm.sh/release-name: cf + meta.helm.sh/release-namespace: default + labels: + app.kubernetes.io/managed-by: Helm + name: cf-cfapi + namespace: default +spec: + maxReplicas: 16 + metrics: + - object: + metricName: requests_per_pod + target: + apiVersion: v1 + kind: Service + name: cf-cfapi + targetValue: "10" + type: Object + - object: + metricName: cpu_usage_avg + target: + apiVersion: apps/v1 + kind: Deployment + name: cf-cfapi-base + targetValue: "1" + type: Object + - object: + metricName: memory_working_set_bytes_avg + target: + apiVersion: apps/v1 + kind: Deployment + name: cf-cfapi-base + targetValue: 3G + type: Object + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: cf-cfapi-base +{% endraw%} +{% endhighlight %} + +* `requests_per_pod` is based on `rate(nginx_ingress_controller_requests)` metric ingested from nginx-ingress-controller +* `cpu_usage_avg` based on cadvisor (from kubelet) rate `(rate(container_cpu_user_seconds_total)` +* `memory_working_set_bytes_avg` based on cadvisor `container_memory_working_set_bytes` + +**pipeline-manager HPA** + +based on `cpu_usage_avg` + +{% highlight yaml %} +{% raw %} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + annotations: + meta.helm.sh/release-name: cf + meta.helm.sh/release-namespace: default + labels: + app.kubernetes.io/managed-by: Helm + name: cf-pipeline-manager +spec: + maxReplicas: 8 + metrics: + - object: + metricName: cpu_usage_avg + target: + apiVersion: apps/v1 + kind: Deployment + name: cf-pipeline-manager-base + targetValue: 400m + type: Object + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: cf-pipeline-manager-base +{% endraw%} +{% endhighlight %} + +**prometheus-adapter configuration** + +Reference: [https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md +) + +{% highlight yaml %} +{% raw %} +Rules: + - metricsQuery: | + kube_service_info{<<.LabelMatchers>>} * on() group_right(service) + (sum(rate(nginx_ingress_controller_requests{<<.LabelMatchers>>}[2m])) + / on() kube_deployment_spec_replicas{deployment='<>-base',namespace='<>'}) + name: + as: requests_per_pod + matches: ^(.*)$ + resources: + overrides: + namespace: + resource: namespace + service: + resource: service + seriesQuery: kube_service_info{service=~".*cfapi.*"} + - metricsQuery: | + kube_deployment_labels{<<.LabelMatchers>>} * on(label_app) group_right(deployment) + (label_replace( + avg by (container) (rate(container_cpu_user_seconds_total{container=~"cf-(tasker-kubernetes|cfapi.*|pipeline-manager.*)", job="kubelet", namespace='<>'}[15m])) + , "label_app", "$1", "container", "(.*)")) + name: + as: cpu_usage_avg + matches: ^(.*)$ + resources: + overrides: + deployment: + group: apps + resource: deployment + namespace: + resource: namespace + seriesQuery: kube_deployment_labels{label_app=~"cf-(tasker-kubernetes|cfapi.*|pipeline-manager.*)"} + - metricsQuery: "kube_deployment_labels{<<.LabelMatchers>>} * on(label_app) group_right(deployment)\n + \ (label_replace(\n avg by (container) (avg_over_time (container_memory_working_set_bytes{container=~\"cf-.*\", + job=\"kubelet\", namespace='<>'}[15m]))\n + \ , \"label_app\", \"$1\", \"container\", \"(.*)\"))\n \n" + name: + as: memory_working_set_bytes_avg + matches: ^(.*)$ + resources: + overrides: + deployment: + group: apps + resource: deployment + namespace: + resource: namespace + seriesQuery: kube_deployment_labels{label_app=~"cf-.*"} + - metricsQuery: | + kube_deployment_labels{<<.LabelMatchers>>} * on(label_app) group_right(deployment) + label_replace(label_replace(avg_over_time(newrelic_apdex_score[15m]), "label_app", "cf-$1", "exported_app", '(cf-api.*|pipeline-manager|tasker-kuberentes)\\[kubernetes\\]'), "label_app", "$1cfapi$3", "label_app", '(cf-)(cf-api)(.*)') + name: + as: newrelic_apdex + matches: ^(.*)$ + resources: + overrides: + deployment: + group: apps + resource: deployment + namespace: + resource: namespace + seriesQuery: kube_deployment_labels{label_app=~"cf-(tasker-kubernetes|cfapi.*|pipeline-manager)"} +{% endraw%} +{% endhighlight %} + +**How to define HPA in Codefresh installer (kcfi) config** + +Most of Codefresh's Microservices subcharts contain `templates/hpa.yaml`: + +{% highlight yaml %} +{% raw %} +{{- if .Values.HorizontalPodAutoscaler }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "cfapi.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "cfapi.fullname" . }}-{{ .version | default "base" }} + minReplicas: {{ coalesce .Values.HorizontalPodAutoscaler.minReplicas .Values.replicaCount 1 }} + maxReplicas: {{ coalesce .Values.HorizontalPodAutoscaler.maxReplicas .Values.replicaCount 2 }} + metrics: +{{- if .Values.HorizontalPodAutoscaler.metrics }} +{{ toYaml .Values.HorizontalPodAutoscaler.metrics | indent 4 }} +{{- else }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 +{{- end }} +{{- end }} +{% endraw%} +{% endhighlight %} + +To configure HPA for CFapi add `HorizontalPodAutoscaler` values to config.yaml, for example: + +(assuming that we already have prometheus adapter configured for metrics `requests_per_pod`, `cpu_usage_avg`, `memory_working_set_bytes_avg`) + +{% highlight yaml %} +{% raw %} +cfapi: + replicaCount: 4 + resources: + requests: + memory: "4096Mi" + cpu: "1100m" + limits: + memory: "4096Mi" + cpu: "2200m" + HorizontalPodAutoscaler: + minReplicas: 2 + maxReplicas: 16 + metrics: + - type: Object + object: + metricName: requests_per_pod + target: + apiVersion: "v1" + kind: Service + name: cf-cfapi + targetValue: 10 + - type: Object + object: + metricName: cpu_usage_avg + target: + apiVersion: "apps/v1" + kind: Deployment + name: cf-cfapi-base + targetValue: 1 + - type: Object + object: + metricName: memory_working_set_bytes_avg + target: + apiVersion: "apps/v1" + kind: Deployment + name: cf-cfapi-base + targetValue: 3G +{% endraw%} +{% endhighlight %} + +**Querying metrics (for debugging)** + +CPU Metric API Call + +``` +kubectl get --raw /apis/metrics.k8s.io/v1beta1/namespaces/codefresh/pods/cf-cfapi-base-****-/ | jq +``` + +Custom Metrics Call + +``` +kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1/namespaces/codefresh/services/cf-cfapi/requests_per_pod | jq +``` + + +## Common Problems, Solutions, and Dependencies + +### Dependencies + +#### Mongo + +All services using the MongoDB are dependent on the `mongo` pod being up and running. If the `mongo` pod is down, the following dependencies will not work: + +- `runtime-environment-manager` +- `pipeline-manager` +- `cf-api` +- `cf-broadcaster` +- `context-manager` +- `nomios` +- `cronius` +- `cluster-promoters` +- `k8s-monitor` +- `charts-manager` +- `tasker-kubernetes` + +#### Logs + +There is a dependency between the `cf-broadcaster` pod and the `cf-api` pod. If your pipeline runs, but does not show any logs, try restarting the broadcaster pod. + +### Problems and Solutions + +**Problem:** installer fails because `codefresh` database does not exist. + +**Solution:** If you are using an external PostgresSQL database (instead of the internal one that the installer provides), you will first need to manually create a new database named `codefresh` inside your PostgresSQL database before running the installer. + + diff --git a/_docs/installation/codefresh-runner.md b/_docs/installation/codefresh-runner.md new file mode 100644 index 000000000..a5fde7970 --- /dev/null +++ b/_docs/installation/codefresh-runner.md @@ -0,0 +1,2072 @@ +--- +title: "Codefresh Runner installation" +description: "Run Codefresh pipelines on your private Kubernetes cluster" +group: installation +redirect_from: + - /docs/enterprise/codefresh-runner/ +toc: true +--- + +Install the Codefresh Runner on your Kubernetes cluster to run pipelines and access secure internal services without compromising on-premises security requirements. These pipelines run on your infrastructure, even behind the firewall, and keep code on your Kubernetes cluster secure. + +[Skip to quick installation →](#installation-with-the-quick-start-wizard) + +>Important: + You must install the Codefresh Runner on _each cluster running Codefresh pipelines_. + The Runner is **not** needed in clusters used for _deployment_. You can deploy applications on clusters other than the ones the runner is deployed on. + +The installation process takes care of all Runner components and other required resources (config-maps, secrets, volumes). + +## Prerequisites + +To use the Codefresh runner the following is required: + +1. A Kubernetes cluster with outgoing internet access (versions 1.10 to 1.23). Each node should have 50GB disk size. +2. A container runtime, such as [docker](https://kubernetes.io/blog/2020/12/02/dockershim-faq/), [containerd](https://containerd.io/) or [cri-o](https://cri-o.io/). Note that the runner is **not** dependent on any special dockershim features, so any compliant container runtime is acceptable. The docker socket/daemon used by Codefresh pipelines is **NOT** the one on the host node (as it might not exist at all in the case of containerd or cri-o), but instead an internal docker daemon created/managed by the pipeline itself. +3. A [Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) with the Hybrid feature enabled. +4. A [Codefresh CLI token]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions) that will be used to authenticate your Codefresh account. + +The runner can be installed from any workstation or laptop with access (i.e. via `kubectl`) to the Kubernetes cluster running Codefresh builds. The Codefresh runner will authenticate to your Codefresh account by using the Codefresh CLI token. + +## System Requirements + +Once installed the runner uses the following pods: + +* `runner` - responsible for picking tasks (builds) from the Codefresh API +* `engine` - responsible for running pipelines +* `dind` - responsible for building and using Docker images +* `dind-volume-provisioner` - responsible for provisioning volumes (PV) for dind +* `dind-lv-monitor` - responsible for cleaning **local** volumes + +**CPU/Memory** + +The following table shows **MINIMUM** resources for each component: + +{: .table .table-bordered .table-hover} +| Component | CPU requests| RAM requests | Storage | Type | Always on | +| -------------- | --------------|------------- |-------------------------|-------|-------| +| `runner` | 100m | 100Mi | Doesn't need PV | Deployment | Yes | +| `engine` | 100m | 500Mi | Doesn't need PV | Pod | No | +| `dind` | 400m | 800Mi | 16GB PV | Pod | No | +| `dind-volume-provisioner` | 300m | 400Mi | Doesn't need PV | Deployment | Yes | +| `dind-lv-monitor` | 300m | 400Mi | Doesn't need PV | DaemonSet | Yes | + +Components that are always on consume resources all the time. Components that are not always on only consume resources when pipelines are running (they are created and destroyed automatically for each pipeline). + +Node size and count will depend entirely on how many pipelines you want to be “ready” for and how many will use “burst” capacity. + +* Ready (nodes): Lower initialization time and faster build times. +* Burst (nodes): High initialization time and slower build times. (Not recommended) + +The size of your nodes directly relates to the size required for your pipelines and thus it is dynamic. If you find that only a few larger pipelines require larger nodes you may want to have two Codefresh Runners associated to different node pools. + + +**Storage** + +For the storage options needed by the `dind` pod we suggest: + +* [Local Volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local) `/var/lib/codefresh/dind-volumes` on the K8S nodes filesystem (**default**) +* [EBS](https://aws.amazon.com/ebs/) in the case of AWS. See also the [notes](#installing-on-aws) about getting caching working. +* [Local SSD](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/local-ssd) or [GCE Disks](https://cloud.google.com/compute/docs/disks#pdspecs) in the case of GCP. See [notes](#installing-on-google-kubernetes-engine) about configuration. + + +**Networking Requirements** + +* `dind` - this pod will create an internal network in the cluster to run all the pipeline steps; needs outgoing/egress access to Dockerhub and `quay.io` +* `runner` - this pod needs outgoing/egress access to `g.codefresh.io`; needs network access to [app-proxy]({{site.baseurl}}/docs/administration/codefresh-runner/#optional-installation-of-the-app-proxy) (if app-proxy is used) +* `engine` - this pod needs outgoing/egress access to `g.codefresh.io`, `*.firebaseio.com` and `quay.io`; needs network access to `dind` pod + +All CNI providers/plugins are compatible with the runner components. + +## Installation with the Quick-start Wizard + +Install the Codefresh CLI + +```shell +npm install -g codefresh +``` + +[Alternative install methods](https://codefresh-io.github.io/cli/installation/) + +Authenticate the CLI + +```shell +codefresh auth create-context --api-key {API_KEY} +``` + +You can obtain an API Key from your [user settings page](https://g.codefresh.io/user/settings). +>**Note:** Make sure when you generate the token used to authenticate with the CLI, you generate it with *all scopes*. + +>**Note:** access to the Codefresh CLI is only needed once during the Runner installation. After that, the Runner will authenticate on it own using the details provided. You do NOT need to install the Codefresh CLI on the cluster that is running Codefresh pipelines. + +Then run the wizard with the following command: + +```shell +codefresh runner init +``` + +or + +```shell +codefresh runner init --token +``` + +Brefore proceeding with installation, the wizard asks you some basic questions. + +{% include image.html + lightbox="true" + file="/images/administration/runner/installation-wizard.png" + url="/images/administration/runner/installation-wizard.png" + alt="Codefresh Runner wizard" + caption="Codefresh Runner wizard" + max-width="100%" + %} + +The wizard also creates and runs a sample pipeline that you can see in your Codefresh UI. + +{% include image.html + lightbox="true" + file="/images/administration/runner/sample-pipeline.png" + url="/images/administration/runner/sample-pipeline.png" + alt="Codefresh Runner example pipeline" + caption="Codefresh Runner example pipeline" + max-width="90%" + %} + +That's it! You can now start using the Runner. + +You can also verify your installation with: + +```shell +codefresh runner info +``` + +During installation you can see which API token will be used by the runner (if you don't provide one). The printed token is used by the runner to talk to the Codefresh platform carrying permissions that allow the runner to run pipelines. If you save the token, it can later be used to restore the runner's permissions without creating a new runner installation, if the deployment is deleted. + +**Customizing the Wizard Installation** + +You can customize the wizard installation by passing your own values in the `init` command. +To inspect all available options run `init` with the `--help` flag: + +```shell +codefresh runner init --help +``` + +**Inspecting the Manifests Before they are Installed** + +If you want to see what manifests are used by the installation wizard you can supply the `--dry-run` parameter in the installation process. + +```shell +codefresh runner init --dry-run +``` + +This will execute the wizard in a special mode that will not actually install anything in your cluster. After all configuration questions are asked, all Kubernetes manifests used by the installer will be instead saved locally in a folder `./codefresh_manifests`. + +## Install Codefresh Runner with values file + +To install the Codefresh Runner with pre-defined values file use `--values` flag: + +```shell +codefresh runner init --values values.yaml +``` + +Use [this example](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml) as a starting point for your values file. + +## Install Codefresh Runner with Helm + +To install the Codefresh Runner using Helm, follow these steps: + +1. Download the Codefresh CLI and authenticate it with your Codefresh account. Click [here](https://codefresh-io.github.io/cli/getting-started/) for more detailed instructions. +2. Run the following command to create all of the necessary entities in Codefresh: + + ```shell + codefresh runner init --generate-helm-values-file + ``` + + * This will not install anything on your cluster, except for running cluster acceptance tests, (which may be skipped using the `--skip-cluster-test` option). Please note, that the Runner Agent and the Runtime Environment are still created in your Codefresh account. + * This command will also generate a `generated_values.yaml` file in your current directory, which you will need to provide to the `helm install` command later. If you want to install several Codefresh Runners, you will need a separate `generated_values.yaml` file for each Runner. + +3. Now run the following to complete the installation: + + ```shell + helm repo add cf-runtime https://chartmuseum.codefresh.io/cf-runtime + + helm install cf-runtime cf-runtime/cf-runtime -f ./generated_values.yaml --create-namespace --namespace codefresh + ``` + * Here is the link to a repository with the chart for reference: [https://github.com/codefresh-io/venona/tree/release-1.0/.deploy/cf-runtime](https://github.com/codefresh-io/venona/tree/release-1.0/.deploy/cf-runtime) + +4. At this point you should have a working Codefresh Runner. You can verify the installation by running: + + ```shell + codefresh runner execute-test-pipeline --runtime-name + ``` +>**Note!**
        +Runtime components' (engine and dind) configuration is determined by the `runner init` command.
        +The `helm install` command can only control the configuration of `runner`, `dind-volume-provisioner` and `lv-monitor` components. + +## Using the Codefresh Runner + +Once installed, the Runner is fully automated. It polls the Codefresh SAAS (by default every 3 seconds) on its own and automatically creates all resources needed for running pipelines. + +Once installation is complete, you should see the cluster of the runner as a new [Runtime environment](https://g.codefresh.io/account-admin/account-conf/runtime-environments) in Codefresh in your *Account Settings*, in the respective tab. + +{% include image.html + lightbox="true" + file="/images/administration/runner/runtime-environments.png" + url="/images/administration/runner/runtime-environments.png" + alt="Available runtime environments" + caption="Available runtime environments" + max-width="60%" + %} + +If you have multiple environments available, you can change the default (shown with a thin blue border) by clicking on the 3 dot menu on the right of each environment. The Codefresh runner installer comes with a `set-default` option that is automatically set by default in the new runtime environment. + +You can even override the runtime environment for a specific pipeline by specifying in the respective section in the [pipeline settings]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/). + +{% include image.html + lightbox="true" + file="/images/administration/runner/environment-per-pipeline.png" + url="/images/administration/runner/environment-per-pipeline.png" + alt="Running a pipeline on a specific environment" + caption="Running a pipeline on a specific environment" + max-width="60%" + %} + +## Checking the Runner + +Once installed, the runner is a normal Kubernetes application like all other applications. You can use your existing tools to monitor it. + +Only the runner pod is long living inside your cluster. All other components (such as the engine) are short lived and exist only during pipeline builds. +You can always see what the Runner is doing by listing the resources inside the namespace you chose during installation: + +```shell +$ kubectl get pods -n codefresh-runtime +NAME READY STATUS RESTARTS AGE +dind-5ee7577017ef40908b784388 1/1 Running 0 22s +dind-lv-monitor-runner-hn64g 1/1 Running 0 3d +dind-lv-monitor-runner-pj84r 1/1 Running 0 3d +dind-lv-monitor-runner-v2lhc 1/1 Running 0 3d +dind-volume-provisioner-runner-64994bbb84-lgg7v 1/1 Running 0 3d +engine-5ee7577017ef40908b784388 1/1 Running 0 22s +monitor-648b4778bd-tvzcr 1/1 Running 0 3d +runner-5d549f8bc5-7h5rc 1/1 Running 0 3d +``` + +In the same manner you can list secrets, config-maps, logs, volumes etc. for the Codefresh builds. + +## Uninstall the Codefresh Runner + +You can uninstall the Codefresh runner from your cluster by running: + +```shell +codefresh runner delete +``` + +A wizard, similar to the installation wizard, will ask you questions regarding your cluster before finishing with the removal. + +Like the installation wizard, you can pass the additional options in advance as command line parameters (see `--help` output): +```shell +codefresh runner delete --help +``` + + + +## Runner architecture overview + +{% include image.html + lightbox="true" + file="/images/administration/runner/codefresh_runner.png" + url="/images/administration/runner/codefresh_runner.png" + alt="Codefresh Runner architecture overview" + caption="Codefresh Runner architecture overview" + max-width="100%" + %} + + +1. [Runtime-Environment specification]({{site.baseurl}}/docs/administration/codefresh-runner/) defines engine and dind pods spec and PVC parameters. +2. Runner pod (Agent) pulls tasks (Builds) from Codefresh API every 3 seconds. +3. Once the agent receives build task (either Manual run build or Webhook triggered build) it calls k8s API to create engine/dind pods and PVC object. +4. Volume Provisioner listens for PVC events (create) and based on StorageClass definition it creates PV object with the corresponding underlying volume backend (ebs/gcedisk/local). +5. During the build, each step (clone/build/push/freestyle/composition) is represented as docker container inside dind (docker-in-docker) pod. Shared Volume (`/codefresh/volume`) is represented as docker volume and mounted to every step (docker containers). PV mount point inside dind pod is `/var/lib/docker`. +6. Engine pod controls dind pod. It deserializes pipeline yaml to docker API calls, terminates dind after build has been finished or per user request (sigterm). +7. `dind-lv-monitor` DaemonSet OR `dind-volume-cleanup` CronJob are part of [Runtime Cleaner]({{site.baseurl}}/docs/administration/codefresh-runner/#runtime-cleaners), `app-proxy` Deployment and Ingress are described in the [next section]({{site.baseurl}}/docs/administration/codefresh-runner/#app-proxy-installation), `monitor` Deployment is for [Kubernetes Dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/). + +## App Proxy installation + +The App Proxy is an **optional** component of the runner that is mainly used when the git provider server is installed on-premises behind the firewall. The App Proxy provides the following features once installed: + +* Enables you to automatically create webhooks for Git in the Codefresh UI (same as the SAAS experience) +* Sends commit status information back to your Git provider (same as the SAAS experience) +* Makes all Git Operations in the GUI work exactly like the SAAS installation of Codefresh + +The requirements for the App proxy is a Kubernetes cluster that: + +1. has already the Codefresh runner installed +1. has an active [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress/) +1. allows incoming connections from the VPC/VPN where users are browsing the Codefresh UI. The ingress connection **must** have a hostname assigned for this route and **must** be configured to perform SSL termination + +>Currently the App-proxy works only for Github (SAAS and on-prem versions), Gitlab (SAAS and on-prem versions) and Bitbucket server. + +Here is the architecture of the app-proxy: + +{% include image.html + lightbox="true" + file="/images/administration/runner/app-proxy-architecture.png" + url="/images/administration/runner/app-proxy-architecture.png" + alt="How App Proxy and the Codefresh runner work together" + caption="How App Proxy and the Codefresh runner work together" + max-width="80%" + %} + +Basically when a Git GET operation takes place, the Codefresh UI will contact the app-proxy (if it is present) and it will route the request to the backing Git provider. The confidential Git information never leaves the firewall premises and the connection between the browser and the ingress is SSL/HTTPS. + +The app-proxy has to work over HTTPS and by default it will use the ingress controller to do its SSL termination. Therefore, the ingress controller will need to be configured to perform SSL termination. Check the documentation of your ingress controller (for example [nginx ingress](https://kubernetes.github.io/ingress-nginx/examples/tls-termination/)). This means that the app-proxy does not compromise security in any way. + +To install the app-proxy on a Kubernetes cluster that already has a Codefresh runner use the following command: + +```shell +codefresh install app-proxy --host= +``` + +If you want to install the Codefresh runner and app-proxy in a single command use the following: + +```shell +codefresh runner init --app-proxy --app-proxy-host= +``` + +If you have multiple ingress controllers in the Kubernetes cluster you can use the `--app-proxy-ingress-class` parameter to define which ingress will be used. For additional security you can also define an allowlist for IPs/ranges that are allowed to use the ingress (to further limit the web browsers that can access the Ingress). Check the documentation of your ingress controller for the exact details. + +By default the app-proxy ingress will use the path `hostname/app-proxy`. You can change that default by using the values file in the installation with the flag `--values values.yaml`. + +See the `AppProxy` section in the example [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml#L231-L253). + +```shell +codefresh install app-proxy --values values.yaml +``` + +## Manual Installation of Runner Components + +If you don't want to use the wizard, you can also install the components of the runner yourself. + +The Codefresh runner consists of the following: + +* Runner - responsible for getting tasks from the platform and executing them. One per account. Can handle multiple runtimes +* Runtime - the components that are responsible on runtime for the workflow execution : + * Volume provisioner - (pod’s name prefix dind-volume-provisioner-runner) - responsible for volume provisioning for dind pod + * lv-monitor - (pod’s name prefix dind-lv-monitor-runner) - daemonset - responsible for cleaning volumes + +To install the runner on a single cluster with both the runtime and the agent, execute the following: + +```shell +kubectl create namespace codefresh +codefresh install agent --agent-kube-namespace codefresh --install-runtime +``` + +You can then follow the instructions for [using the runner](#using-the-codefresh-runner). + +### Installing Multiple runtimes with a Single Agent + +It is also possible, for advanced users to install a single agent that can manage multiple runtime environments. + +>NOTE: Please make sure that the cluster where the agent is installed has network access to the other clusters of the runtimes + +```shell +# 1. Create namespace for the agent: +kubectl create namespace codefresh-agent + +# 2. Install the agent on the namespace ( give your agent a unique name as $NAME): +# Note down the token and use it in the second command. +codefresh create agent $NAME +codefresh install agent --token $TOKEN --kube-namespace codefresh-agent +codefresh get agents + +# 3. Create namespace for the first runtime: +kubectl create namespace codefresh-runtime-1 + +# 4. Install the first runtime on the namespace +# 5. the runtime name is printed +codefresh install runtime --runtime-kube-namespace codefresh-runtime-1 + +# 6. Attach the first runtime to agent: +codefresh attach runtime --agent-name $AGENT_NAME --agent-kube-namespace codefresh-agent --runtime-name $RUNTIME_NAME --runtime-kube-namespace codefresh-runtime-1 + +# 7. Restart the runner pod in namespace `codefresh-agent` +kubectl delete pods $RUNNER_POD + +# 8. Create namespace for the second runtime +kubectl create namespace codefresh-runtime-2 + +# 9. Install the second runtime on the namespace +codefresh install runtime --runtime-kube-namespace codefresh-runtime-2 + +# 10. Attach the second runtime to agent and restart the Venona pod automatically +codefresh attach runtime --agent-name $AGENT_NAME --agent-kube-namespace codefresh-agent --runtime-name $RUNTIME_NAME --runtime-kube-namespace codefresh-runtime-2 --restart-agent +``` + +## Configuration Options + +You can fine tune the installation of the runner to better match your environment and cloud provider. + +### Installing on AWS + +If you've installed the Codefresh runner on [EKS](https://aws.amazon.com/eks/) or any other custom cluster (e.g. with kops) in Amazon you need to configure it properly to work with EBS volumes in order to gain [caching]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipeline-caching/). + +> This section assumes you already installed the Runner with default options: `codefresh runner init` + +**Prerequisites** + +`dind-volume-provisioner` deployment should have permissions to create/attach/detach/delete/get ebs volumes. + +There are 3 options: +* running `dind-volume-provisioner` pod on the node (node-group) with iam role +* k8s secret with [aws credentials format](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) mounted to ~/.aws/credentials (or `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` env vars passed) to the `dind-volume-provisioner` pod +* using [Aws Identity for Service Account](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) iam role assigned to `volume-provisioner-runner` service account + +Minimal policy for `dind-volume-provisioner`: +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume" + ], + "Resource": "*" + } + ] +} +``` + +Create Storage Class for EBS volumes: +>Choose **one** of the Availability Zones you want to be used for your pipeline builds. Multi AZ configuration is not supported. + +**Storage Class (gp2)** + +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: dind-ebs +### Specify name of provisioner +provisioner: codefresh.io/dind-volume-provisioner-runner-<-NAMESPACE-> # <---- rename <-NAMESPACE-> with the runner namespace +volumeBindingMode: Immediate +parameters: + # ebs or ebs-csi + volumeBackend: ebs + # Valid zone + AvailabilityZone: us-central1-a # <---- change it to your AZ + # gp2, gp3 or io1 + VolumeType: gp2 + # in case of io1 you can set iops + # iops: 1000 + # ext4 or xfs (default to xfs, ensure that there is xfstools ) + fsType: xfs +``` +**Storage Class (gp3)** + +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: dind-ebs +### Specify name of provisioner +provisioner: codefresh.io/dind-volume-provisioner-runner-<-NAMESPACE-> # <---- rename <-NAMESPACE-> with the runner namespace +volumeBindingMode: Immediate +parameters: + # ebs or ebs-csi + volumeBackend: ebs + # Valid zone + AvailabilityZone: us-central1-a # <---- change it to your AZ + # gp2, gp3 or io1 + VolumeType: gp3 + # ext4 or xfs (default to xfs, ensure that there is xfstools ) + fsType: xfs + # I/O operations per second. Only effetive when gp3 volume type is specified. + # Default value - 3000. + # Max - 16,000 + iops: "5000" + # Throughput in MiB/s. Only effective when gp3 volume type is specified. + # Default value - 125. + # Max - 1000. + throughput: "500" +``` + +Apply storage class manifest: +```shell +kubectl apply -f dind-ebs.yaml +``` + +Change your [runtime environment]({{site.baseurl}}/docs/administration/codefresh-runner/#full-runtime-environment-specification) configuration: + +The same AZ you selected before should be used in nodeSelector inside Runtime Configuration: + +To get a list of all available runtimes execute: + +```shell +codefresh get runtime-environments +``` + +Choose the runtime you have just added and get its yaml representation: + +```shell +codefresh get runtime-environments my-eks-cluster/codefresh -o yaml > runtime.yaml +``` + + Under `dockerDaemonScheduler.cluster` block add the nodeSelector `topology.kubernetes.io/zone: `. It should be at the same level as `clusterProvider` and `namespace`. Also, the `pvcs.dind` block should be modified to use the Storage Class you created above (`dind-ebs`). + +`runtime.yaml` example: + +```yaml +version: 1 +metadata: + ... +runtimeScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + nodeSelector: + topology.kubernetes.io/zone: us-central1-a + serviceAccount: codefresh-engine + annotations: {} + userAccess: true + defaultDindResources: + requests: '' + pvcs: + dind: + volumeSize: 30Gi + storageClassName: dind-ebs + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName' +extends: + - system/default/hybrid/k8s_low_limits +description: '...' +accountId: 5f048d85eb107d52b16c53ea +``` + +Update your runtime environment with the [patch command](https://codefresh-io.github.io/cli/operate-on-resources/patch/): + +```shell +codefresh patch runtime-environment my-eks-cluster/codefresh -f runtime.yaml +``` + +If necessary, delete all existing PV and PVC objects left from default local provisioner: +``` +kubectl delete pvc -l codefresh-app=dind -n +kubectl delete pv -l codefresh-app=dind -n +``` + +>You can define all these options above for clean Runner installation with [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml) file: + +`values-ebs.yaml` example: + +```yaml +### Storage parameter example for aws ebs disks +Storage: + Backend: ebs + AvailabilityZone: us-east-1d + VolumeType: gp3 + #AwsAccessKeyId: ABCDF + #AwsSecretAccessKey: ZYXWV + Encrypted: # encrypt volume, default is false + VolumeProvisioner: + ServiceAccount: + Annotations: + eks.amazonaws.com/role-arn: arn:aws:iam:::role/ +NodeSelector: topology.kubernetes.io/zone=us-east-1d +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + topology.kubernetes.io/zone: us-east-1d +``` + +```shell +codefresh runner init --values values-ebs.yaml --exec-demo-pipeline false --skip-cluster-integration true +``` + +### Installing to EKS with Autoscaling + +#### Step 1- EKS Cluster Creation + +See below is a content of cluster.yaml file. We define separate node pools for dind, engine and other services(like runner, cluster-autoscaler etc). + +Before creating the cluster we have created two separate IAM policies: + +* one for our volume-provisioner controller(policy/runner-ebs) that should create and delete volumes +* one for dind pods(policy/dind-ebs) that should be able to attach/detach those volumes to the appropriate nodes using [iam attachPolicyARNs options](https://eksctl.io/usage/iam-policies/). + +`policy/dind-ebs:` + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeVolumes" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DetachVolume", + "ec2:AttachVolume" + ], + "Resource": [ + "*" + ] + } + ] +} +``` + +`policy/runner-ebs:` + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume" + ], + "Resource": "*" + } + ] +} +``` + +`my-eks-cluster.yaml` + +```yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: my-eks + region: us-west-2 + version: "1.15" + +nodeGroups: + - name: dind + instanceType: m5.2xlarge + desiredCapacity: 1 + iam: + attachPolicyARNs: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + - arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess + - arn:aws:iam::XXXXXXXXXXXX:policy/dind-ebs + withAddonPolicies: + autoScaler: true + ssh: # import public key from file + publicKeyPath: ~/.ssh/id_rsa.pub + minSize: 1 + maxSize: 50 + volumeSize: 50 + volumeType: gp2 + ebsOptimized: true + availabilityZones: ["us-west-2a"] + kubeletExtraConfig: + enableControllerAttachDetach: false + labels: + node-type: dind + taints: + codefresh.io: "dinds:NoSchedule" + + - name: engine + instanceType: m5.large + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + minSize: 1 + maxSize: 10 + volumeSize: 50 + volumeType: gp2 + availabilityZones: ["us-west-2a"] + labels: + node-type: engine + taints: + codefresh.io: "engine:NoSchedule" + + - name: addons + instanceType: m5.2xlarge + desiredCapacity: 1 + ssh: # import public key from file + publicKeyPath: ~/.ssh/id_rsa.pub + minSize: 1 + maxSize: 10 + volumeSize: 50 + volumeType: gp2 + ebsOptimized: true + availabilityZones: ["us-west-2a"] + labels: + node-type: addons + iam: + attachPolicyARNs: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + - arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess + - arn:aws:iam::XXXXXXXXXXXX:policy/runner-ebs + withAddonPolicies: + autoScaler: true +availabilityZones: ["us-west-2a", "us-west-2b", "us-west-2c"] +``` + +Execute: + +```shell +eksctl create cluster -f my-eks-cluster.yaml +``` + +The config above will leverage [Amazon Linux 2](https://aws.amazon.com/amazon-linux-2/) as the default operating system for the nodes in the nodegroup. To leverage [Bottlerocket-based nodes](https://aws.amazon.com/bottlerocket/), specify the AMI Family using `amiFamily: Bottlerocket` and add the following additional IAM Policies: `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` and `arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore`. + +>Bottlerocket is an open source Linux based Operating System specifically built to run containers. It focuses on security, simplicity and easy updates via transactions. Find more information in the [official repository](https://github.com/bottlerocket-os/bottlerocket). + +#### Step 2 - Autoscaler + +Once the cluster is up and running we need to install the [cluster autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html): + +We used iam AddonPolicies `"autoScaler: true"` in the cluster.yaml file so there is no need to create a separate IAM policy or add Auto Scaling group tags, everything is done automatically. + +Deploy the Cluster Autoscaler: + +```shell +kubectl apply -f https://raw.githubusercontent.com/kubernetes/autoscaler/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml +``` + +Add the `cluster-autoscaler.kubernetes.io/safe-to-evict` annotation + +```shell +kubectl -n kube-system annotate deployment.apps/cluster-autoscaler cluster-autoscaler.kubernetes.io/safe-to-evict="false" +``` + +Edit the cluster-autoscaler container command to replace `` with *my-eks*(name of the cluster from cluster.yaml file), and add the following options: + `--balance-similar-node-groups` and `--skip-nodes-with-system-pods=false` + +```shell +kubectl -n kube-system edit deployment.apps/cluster-autoscaler +``` + +```yaml +spec: + containers: + - command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=aws + - --skip-nodes-with-local-storage=false + - --expander=least-waste + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/my-eks + - --balance-similar-node-groups + - --skip-nodes-with-system-pods=false +``` + +We created our EKS cluster with 1.15 version so the appropriate cluster autoscaler version from [https://github.com/kubernetes/autoscaler/releases](https://github.com/kubernetes/autoscaler/releases) is 1.15.6 + +```shell +kubectl -n kube-system set image deployment.apps/cluster-autoscaler cluster-autoscaler=us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.15.6 +``` + +Check your own version to make sure that the autoscaler version is appropriate. + +#### Step 3 - Optional: We also advise to configure overprovisioning with Cluster Autoscaler + +See details at the [FAQ]( +https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + +#### Step 4 - Adding an EKS cluster as a runner to the Codefresh platform with EBS support + +Make sure that you are targeting the correct cluster + +```shell +$ kubectl config current-context +my-aws-runner +``` + +Install the runner passing additional options: + +```shell +codefresh runner init \ +--name my-aws-runner \ +--kube-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--build-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--kube-namespace cf --kube-context-name my-aws-runner \ +--set-value Storage.VolumeProvisioner.NodeSelector=node-type=addons \ +--set-value=Storage.Backend=ebs \ +--set-value=Storage.AvailabilityZone=us-west-2a +``` + +* You should specify the zone in which you want your volumes to be created, example: `--set-value=Storage.AvailabilityZone=us-west-2a` +* (Optional) - if you want to assign the volume-provisioner to a specific node, for example a specific node group what has an IAM role which allows to create EBS volumes, example: `--set-value Storage.VolumeProvisioner.NodeSelector=node-type=addons` + +If you want to use [encrypted EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_key_mgmt) (they are unencrypted by default) - add the custom value `--set-value=Storage.Encrypted=true` +If you already have a key - add its ARN via `--set-value=Storage.KmsKeyId= value`, otherwise a key is generated by AWS. Here is the full command: + +```shell +codefresh runner init \ +--name my-aws-runner \ +--kube-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--build-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--kube-namespace cf --kube-context-name my-aws-runner \ +--set-value Storage.VolumeProvisioner.NodeSelector=node-type=addons \ +--set-value=Storage.Backend=ebs \ +--set-value=Storage.AvailabilityZone=us-west-2a\ +--set-value=Storage.Encrypted=[false|true] \ +--set-value=Storage.KmsKeyId= +``` + +For an explanation of all other options run `codefresh runner init --help` ([global parameter table](#customizing-the-wizard-installation)). + +At this point the quick start wizard will start the installation. + +Once that is done we need to modify the runtime environment of `my-aws-runner` to specify the necessary toleration, nodeSelector and disk size: + +```shell +codefresh get re --limit=100 my-aws-runner/cf -o yaml > my-runtime.yml +``` + +Modify the file my-runtime.yml as shown below: + +```yaml +version: null +metadata: + agent: true + trial: + endingAt: 1593596844167 + reason: Codefresh hybrid runtime + started: 1592387244207 + name: my-aws-runner/cf + changedBy: ivan-codefresh + creationTime: '2020/06/17 09:47:24' +runtimeScheduler: + cluster: + clusterProvider: + accountId: 5cb563d0506083262ba1f327 + selector: my-aws-runner + namespace: cf + nodeSelector: + node-type: engine + tolerations: + - effect: NoSchedule + key: codefresh.io + operator: Equal + value: engine + annotations: {} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5cb563d0506083262ba1f327 + selector: my-aws-runner + namespace: cf + nodeSelector: + node-type: dind + annotations: {} + defaultDindResources: + requests: '' + tolerations: + - effect: NoSchedule + key: codefresh.io + operator: Equal + value: dinds + pvcs: + dind: + volumeSize: 30Gi + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName' + storageClassName: dind-local-volumes-runner-cf + userAccess: true +extends: + - system/default/hybrid/k8s_low_limits +description: 'Runtime environment configure to cluster: my-aws-runner and namespace: cf' +accountId: 5cb563d0506083262ba1f327 +``` + +Apply changes. + +```shell +codefresh patch re my-aws-runner/cf -f my-runtime.yml +``` + +That's all. Now you can go to UI and try to run a pipeline on RE my-aws-runner/cf + +### Injecting AWS arn roles into the cluster + +**Step 1** - Make sure the OIDC provider is connected to the cluster + +See: + +* [https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) +* [https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) + +**Step 2** - Create IAM role and policy as explained in [https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html](https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html) + +Here, in addition to the policy explained, you need a Trust Relationship established between this role and the OIDC entity. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${ACCOUNT_ID}:oidc-provider/${OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${OIDC_PROVIDER}:sub": "system:serviceaccount:${CODEFRESH_NAMESPACE}:codefresh-engine" + } + } + } + ] +} +``` + +**Step 3** - Annotate the `codefresh-engine` Kubernetes Service Account in the namespace where the Codefresh Runner is installed with the proper IAM role. + +```shell +kubectl annotate -n ${CODEFRESH_NAMESPACE} sa codefresh-engine eks.amazonaws.com/role-arn=${ROLE_ARN} +``` + +Once the annotation is added, you should see it when you describe the Service Account. + +```shell +kubectl describe -n ${CODEFRESH_NAMESPACE} sa codefresh-engine + +Name: codefresh-engine +Namespace: codefresh +Labels: app=app-proxy + version=1.6.8 +Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/Codefresh +Image pull secrets: +Mountable secrets: codefresh-engine-token-msj8d +Tokens: codefresh-engine-token-msj8d +Events: +``` + +**Step 4** - Using the AWS assumed role identity + +After annotating the Service Account, run a pipeline to test the AWS resource access: + +```yaml +RunAwsCli: + title : Communication with AWS + image : mesosphere/aws-cli + stage: "build" + commands : + - apk update + - apk add jq + - env + - cat /codefresh/volume/sensitive/.kube/web_id_token + - aws sts assume-role-with-web-identity --role-arn $AWS_ROLE_ARN --role-session-name mh9test --web-identity-token file://$AWS_WEB_IDENTITY_TOKEN_FILE --duration-seconds 1000 > /tmp/irp-cred.txt + - export AWS_ACCESS_KEY_ID="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.AccessKeyId")" + - export AWS_SECRET_ACCESS_KEY="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.SecretAccessKey")" + - export AWS_SESSION_TOKEN="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.SessionToken")" + - rm /tmp/irp-cred.txt + - aws s3api get-object --bucket jags-cf-eks-pod-secrets-bucket --key eks-pod2019-12-10-21-18-32-560931EEF8561BC4 getObjectNotWorks.txt +``` + +### Installing behind a proxy + +If you want to deploy the Codefresh runner on a Kubernetes cluster that doesn’t have direct access to `g.codefresh.io`, and has to go trough a proxy server to access `g.codefresh.io`, you will need to follow these additional steps: + +**Step 1** - Follow the installation instructions of the previous section + +**Step 2** - Run `kubectl edit deployment runner -n codefresh-runtime` and add the proxy variables like this + +```yaml +spec: + containers: + - env: + - name: HTTP_PROXY + value: http://:port + - name: HTTPS_PROXY + value: http://:port + - name: http_proxy + value: http://:port + - name: https_proxy + value: http://:port + - name: no_proxy + value: localhost,127.0.0.1, + - name: NO_PROXY + value: localhost,127.0.0.1, +``` + +**Step 3** - Add the following variables to your runtime.yaml, both under the `runtimeScheduler:` and under `dockerDaemonScheduler:` blocks inside the `envVars:` section + +```yaml +HTTP_PROXY: http://:port +http_proxy: http://:port +HTTPS_PROXY: http://:port +https_proxy: http://:port +No_proxy: localhost, 127.0.0.1, +NO_PROXY: localhost, 127.0.0.1, +``` + +**Step 4** - Add `.firebaseio.com` to the allowed-sites of the proxy server + +**Step 5** - Exec into the `dind` pod and run `ifconfig` + +If the MTU value for `docker0` is higher than the MTU value of `eth0` (sometimes the `docker0` MTU is 1500, while `eth0` MTU is 1440) - you need to change this, the `docker0` MTU should be lower than `eth0` MTU + +To fix this, edit the configmap in the codefresh-runtime namespace: + +```shell +kubectl edit cm codefresh-dind-config -n codefresh-runtime +``` + +And add this after one of the commas: +`\"mtu\":1440,` + +### Installing on Rancher RKE 2.X + +#### Step 1 - Configure the kubelet to work with the runner's StorageClass + +The runner's default StorageClass creates the persistent cache volume from local storage on each node. We need to edit the cluster config to allow this. + +In the Rancher UI (v2.5.9 and earlier), drill into the target cluster and then click the Edit Cluster button at the top-right. +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-cluster.png" + url="/images/administration/runner/rancher-cluster.png" + alt="Drill into your cluster and click Edit Cluster on the right" + caption="Drill into your cluster and click Edit Cluster on the right" + max-width="100%" + %} + +In Rancher v2.6+ with the updated UI, open the Cluster Management in the left panel, then click the three-dot menu near the corresponding cluster and select 'Edit Config'. +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-cluster-2.png" + url="/images/administration/runner/rancher-cluster-2.png" + alt="Click Edit Cluster on the right in your cluster list" + caption="Click Edit Cluster on the right in your cluster list" + max-width="100%" + %} + +On the edit cluster page, scroll down to the Cluster Options section and click its **Edit as YAML** button +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-edit-as-yaml.png" + url="/images/administration/runner/rancher-edit-as-yaml.png" + alt="Cluster Options -> Edit as YAML" + caption="Cluster Options -> Edit as YAML" + max-width="100%" + %} +Edit the YAML to include an extra mount in the kubelet service: + +```yaml +rancher_kubernetes_engine_config: + ... + services: + ... + kubelet: + extra_binds: + - '/var/lib/codefresh:/var/lib/codefresh:rshared' +``` + +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-kublet.png" + url="/images/administration/runner/rancher-kublet.png" + alt="Add volume to rancher_kubernetes_engine_config.services.kublet.extra_binds" + caption="Add volume to rancher_kubernetes_engine_config.services.kublet.extra_binds" + max-width="100%" + %} + +#### Step 2 - Make sure your kubeconfig user is a ClusterAdmin + +The user in your kubeconfig must be a cluster admin in order to install the runner. If you plan to have your pipelines connect to this cluster as a cluster admin, then you can go ahead and create a Codefresh user for this purpose in the Rancher UI with a **non-expiring** kubeconfig token. This is the easiest way to do the installation. + +However, if you want your pipelines to connect to this cluster with less privileges, then you can use your personal user account with Cluster Admin privileges for the installation, and then we'll create a Codefresh account with lesser privileges later (in Step 5). In that case, you can now move on to Step 3. + +Follow these steps to create a Codefresh user with Cluster Admin rights, from the Rancher UI: + +* Click Security at the top, and then choose Users + {% include image.html lightbox="true" file="/images/administration/runner/rancher-security.png" url="/images/administration/runner/rancher-security.png" alt="Create a cluster admin user for Codefresh" caption="Create a cluster admin ser for Codefresh" max-width="100%" %} +* Click the Add User button, and under Global Permissions check the box for **Restricted Administrstor** +* Log out of the Rancher UI, and then log back in as the new user +* Click your user icon at the top-right, and then choose **API & Keys** +* Click the **Add Key** button and create a kubeconfig token with Expires set to Never +* Copy the Bearer Token field (combines Access Key and Secret Key) +* Edit your kubeconfig and put the Bearer Token you copied in the `token` field of your user + +#### Step 3 - Install the Runner + +If you've created your kubeconfig from the Rancher UI, then it will contain an API endpoint that is not reachable internally, from within the cluster. To work around this, we need to tell the runner to instead use Kubernetes' generic internal API endpoint. Also, if you didn't create a Codefresh user in step 2 and your kubeconfig contains your personal user account, then you should also add the `--skip-cluster-integration` option. + +Install the runner with a Codefresh user (ClusterAdmin, non-expiring token): + +```shell +codefresh runner init \ + --set-value KubernetesHost=https://kubernetes.default.svc.cluster.local +``` + +Or install the runner with your personal user account: + +```shell +codefresh runner init \ + --set-value KubernetesHost=https://kubernetes.default.svc.cluster.local \ + --skip-cluster-integration +``` + +The wizard will then ask you some basic questions. + +#### Step 4 - Update the runner's Docker MTU + +By default, RKE nodes use the [Canal CNI](https://rancher.com/docs/rancher/v2.x/en/faq/networking/cni-providers/#canal), which combines elements of Flannel and Calico, and uses VXLAN encapsulation. This VXLAN encapsulation has a 50-byte overhead, thus reducing the MTU of its virtual interfaces from the standard 1500 to 1450. For example, when running `ifconfig` on an RKE 2.5.5 node, you might see several interfaces like this. Note the `MTU:1450`. + +```shell +cali0f8ac592086 Link encap:Ethernet HWaddr ee:ee:ee:ee:ee:ee + inet6 addr: fe80::ecee:eeff:feee:eeee/64 Scope:Link + UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1 + RX packets:11106 errors:0 dropped:0 overruns:0 frame:0 + TX packets:10908 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:0 + RX bytes:922373 (922.3 KB) TX bytes:9825590 (9.8 MB) +``` + +We must reduce the Docker MTU used by the runner's Docker in Docker (dind) pods to fit within this lower MTU. This is stored in a configmap in the namespace where the runner is installed. Assuming that you installed the runner into the `codefresh` namespace, you would edit the configmap like this: + +```shell +kubectl edit cm codefresh-dind-config -n codefresh +``` + +In the editor, update the **daemon.json** field - add `,\"mtu\":1440` just before the last curley brace. + {% include image.html + lightbox="true" + file="/images/administration/runner/rancher-mtu.png" + url="/images/administration/runner/rancher-mtu.png" + alt="Update the runner's Docker MTU" + caption="Update the runner's Docker MTU" + max-width="100%" + %} + +#### Step 5 - Create the Cluster Integration + +If you created a user in Step 2 and used it to install the runner in Step 3, then you can skip this step - your installation is complete! + +However, if you installed the runner with the `--skip-cluster-integration` option then you should follow the documentaion to [Add a Rancher Cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/#adding-a-rancher-cluster) to your Kubernetes Integrations. + +Once complete, you can go to the Codefresh UI and run a pipeline on the new runtime, including steps that deploy to the Kubernetes Integration. + +#### Troubleshooting TLS Errors + +Depending on your Rancher configuration, you may need to allow insecure HTTPS/TLS connections. You can do this by adding an environment variable to the runner deployment. + +Assuming that you installed the runner into the `codefresh` namespace, you would edit the runner deployment like this: + +```shell +kubectl edit deploy runner -n codefresh +``` + +In the editor, add this environment variable under spec.containers.env[]: + +```yaml +- name: NODE_TLS_REJECT_UNAUTHORIZED + value: "0" +``` + +### Installing on Google Kubernetes Engine + +If you are installing Codefresh runner on the Kubernetes cluster on [GKE](https://cloud.google.com/kubernetes-engine/) + +* make sure your user has `Kubernetes Engine Cluster Admin` role in google console and +* bind your user with `cluster-admin` Kubernetes cluster role. + +```shell +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user $(gcloud config get-value account) +``` + + +#### Storage options on GKE + +**Local SSD** + +If you want to use *LocalSSD* in GKE: + +*Prerequisites:* [GKE cluster with local SSD](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/local-ssd) + +Install Runner with the Wizard: + +```shell +codefresh runner init [options] --set-value=Storage.LocalVolumeParentDir=/mnt/disks/ssd0/codefresh-volumes \ + --build-node-selector=cloud.google.com/gke-local-ssd=true +``` + +Or with `values-example.yaml` values file: + +```yaml +... +### Storage parameters example for gke-local-ssd + Storage: + Backend: local + LocalVolumeParentDir: /mnt/disks/ssd0/codefresh-volumes + NodeSelector: cloud.google.com/gke-local-ssd=true +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + cloud.google.com/gke-local-ssd: "true" +... +``` +```shell +codefresh runner init [options] --values values-example.yaml +``` + +To configure existing Runner with Local SSDs follow this article: + +[How-to: Configuring an existing Runtime Environment with Local SSDs (GKE only)](https://support.codefresh.io/hc/en-us/articles/360016652920-How-to-Configuring-an-existing-Runtime-Environment-with-Local-SSDs-GKE-only-) + + +**GCE Disks** + +If you want to use *GCE Disks*: + +*Prerequisites:* volume provisioner (dind-volume-provisioner) should have permissions to create/delete/get GCE disks + +There are 3 options to provide cloud credentials: + +* run `dind-volume-provisioner-runner` pod on a node with IAM role which is allowed to create/delete/get GCE disks +* create Google Service Account with `ComputeEngine.StorageAdmin` role, download its key in JSON format and pass it to `codefresh runner init` with `--set-file=Storage.GooogleServiceAccount=/path/to/google-service-account.json` +* use [Google Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) to assign IAM role to `volume-provisioner-runner` service account + +Notice that builds will be running in a single availability zone, so you must specify AvailabilityZone parameters. + + +##### Runner installation with GCE Disks (Google SA JSON key) + +Using the Wizard: + +```shell +codefresh runner init [options] \ + --set-value=Storage.Backend=gcedisk \ + --set-value=Storage.AvailabilityZone=us-central1-c \ + --kube-node-selector=topology.kubernetes.io/zone=us-central1-c \ + --build-node-selector=topology.kubernetes.io/zone=us-central1-c \ + --set-file=Storage.GoogleServiceAccount=/path/to/google-service-account.json +``` + +Using the values `values-example.yaml` file: +```yaml +... +### Storage parameter example for GCE disks + Storage: + Backend: gcedisk + AvailabilityZone: us-central1-c + GoogleServiceAccount: > #serviceAccount.json content + { + "type": "service_account", + "project_id": "...", + "private_key_id": "...", + "private_key": "...", + "client_email": "...", + "client_id": "...", + "auth_uri": "...", + "token_uri": "...", + "auth_provider_x509_cert_url": "...", + "client_x509_cert_url": "..." + } + NodeSelector: topology.kubernetes.io/zone=us-central1-c +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + topology.kubernetes.io/zone: us-central1-c +... +``` +```shell +codefresh runner init [options] --values values-example.yaml +``` + + +##### Runner installation with GCE Disks (Workload Identity with IAM role) + +Using the values `values-example.yaml` file: + +```yaml +... +### Storage parameter example for GCE disks + Storage: + Backend: gcedisk + AvailabilityZone: us-central1-c + VolumeProvisioner: + ServiceAccount: + Annotations: #annotation to the volume-provisioner service account, using the email address of the Google service account + iam.gke.io/gcp-service-account: @.iam.gserviceaccount.com + NodeSelector: topology.kubernetes.io/zone=us-central1-c +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + topology.kubernetes.io/zone: us-central1-c +... +``` +```shell +codefresh runner init [options] --values values-example.yaml +``` + +Create the binding between Kubernetes service account and Google service account: + +```shell +export K8S_NAMESPACE=codefresh +export KSA_NAME=volume-provisioner-runner +export GSA_NAME= +export PROJECT_ID= + +gcloud iam service-accounts add-iam-policy-binding \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${PROJECT_ID}.svc.id.goog[${K8S_NAMESPACE}/${KSA_NAME}]" \ + ${GSA_NAME}@${PROJECT_ID}.iam.gserviceaccount.com +``` + +To configure existing Runner with GCE Disks follow this article: + +[How-to: Configuring an existing Runtime Environment with GCE disks](https://support.codefresh.io/hc/en-us/articles/360016652900-How-to-Configuring-an-existing-Runtime-Environment-with-GCE-disks) + + +##### Using multiple Availability Zones + +Currently, to support effective caching with GCE disks, the builds/pods need to be scheduled in a single AZ (this is more related to a GCP limitation than a Codefresh runner issue). + +If you have Kubernetes nodes running in multiple Availability Zones and wish to use the Codefresh runner we suggest the following: + +**Option A** - Provision a new Kubernetes cluster: a cluster that runs in a single AZ only. - The cluster should be dedicated for usage with the Codefresh runner. This is the preferred solution and avoids extra complexity. + +**Option B** - Install Codefresh runner in your multi-zone cluster, and let it run in the default Node Pool: - in this case, you must specify `--build-node-selector=` (e.g.: `--build-node-selector=topology.kubernetes.io/zone=us-central1-c`) or simply modify the Runtime environment as below: + +```shell +codefresh get re $RUNTIME_NAME -o yaml > re.yaml +``` + +Edit the yaml: + +```yaml +version: 2 +metadata: + ... +runtimeScheduler: + cluster: + nodeSelector: #schedule engine pod onto a node whose labels match the nodeSelector + topology.kubernetes.io/zone: us-central1-c + ... +dockerDaemonScheduler: + cluster: + nodeSelector: #schedule dind pod onto a node whose labels match the nodeSelector + topology.kubernetes.io/zone: us-central1-c + ... + pvcs: + dind: + ... +``` + +Apply changes with: + +```shell +codefresh patch re -f re.yaml +``` + +**Option C** - Like option B, but with a dedicated Node Pool + +**Option D** - Have 2 separate Codefresh runner Runtimes, one for zone A, and the other for zone B, and so on: this technically works, but it will require you to manually set the RE to use for the pipelines that won't use the default Codefresh runner RE. To distribute the pipeline's builds across the Codefresh runner REs. + +For example, let's say Venona-zoneA is the default RE, then, that means that for the pipelines that you want to run in Venona-zoneB, then you'll need to modify their RE settings, and explicitly set Venona-zoneB as the one to use. + +Regarding [Regional Persistent Disks](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/regional-pd), their support is not currently implemented in the Codefresh runner. + + +### Installing on AKS + +**Azure Disks** + +*Prerequisite:* volume provisioner (`dind-volume-provisioner`) should have permissions to create/delete/get Azure Disks + +Minimal IAM Role for dind-volume-provisioner:
        +`dind-volume-provisioner-role.json` +```json +{ + "Name": "CodefreshDindVolumeProvisioner", + "Description": "Perform create/delete/get disks", + "IsCustom": true, + "Actions": [ + "Microsoft.Compute/disks/read", + "Microsoft.Compute/disks/write", + "Microsoft.Compute/disks/delete" + + ], + "AssignableScopes": ["/subscriptions/"] +} +``` + +If you use AKS with managed [identities for node group](https://docs.microsoft.com/en-us/azure/aks/use-managed-identity), you can run the script below to assign `CodefreshDindVolumeProvisioner` role to aks node identity: + +```shell +export ROLE_DEFINITIN_FILE=dind-volume-provisioner-role.json +export SUBSCRIPTION_ID=$(az account show --query "id" | xargs echo ) +export RESOURCE_GROUP=codefresh-rt1 +export AKS_NAME=codefresh-rt1 +export LOCATION=$(az aks show -g $RESOURCE_GROUP -n $AKS_NAME --query location | xargs echo) +export NODES_RESOURCE_GROUP=MC_${RESOURCE_GROUP}_${AKS_NAME}_${LOCATION} +export NODE_SERVICE_PRINCIPAL=$(az aks show -g $RESOURCE_GROUP -n $AKS_NAME --query identityProfile.kubeletidentity.objectId | xargs echo) + +az role definition create --role-definition @${ROLE_DEFINITIN_FILE} +az role assignment create --assignee $NODE_SERVICE_PRINCIPAL --scope /subscriptions/$SUBSCRIPTION_ID/resourceGroups/$NODES_RESOURCE_GROUP --role CodefreshDindVolumeProvisioner +``` + +Now install Codefresh Runner with cli wizard: +```shell +codefresh runner init --set-value Storage.Backend=azuredisk --set Storage.VolumeProvisioner.MountAzureJson=true +``` +Or using [values-example.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml): +```yaml +Storage: + Backend: azuredisk + VolumeProvisioner: + MountAzureJson: true +``` +```shell +codefresh runner init --values values-example.yaml +``` +Or with helm chart [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/charts/cf-runtime/values.yaml): +```yaml +storage: + backend: azuredisk + azuredisk: + skuName: Premium_LRS + +volumeProvisioner: + mountAzureJson: true +``` +```shell +helm install cf-runtime cf-runtime/cf-runtime -f ./generated_values.yaml -f values.yaml --create-namespace --namespace codefresh +``` + + +### Internal Registry Mirror + +You can configure your Codefresh Runner to use an internal registry as a mirror for any container images that are mentioned in your pipelines. + +First setup an internal registry as described in [https://docs.docker.com/registry/recipes/mirror/](https://docs.docker.com/registry/recipes/mirror/). + +Then locate the `codefresh-dind-config` config map in the namespace that houses the runner and edit it. + +```shell +kubectl -n codefresh edit configmap codefresh-dind-config +``` + +Change the `data` field from: + +```yaml +data: + daemon.json: "{\n \"hosts\": [ \"unix:///var/run/docker.sock\",\n \"tcp://0.0.0.0:1300\"],\n + \ \"storage-driver\": \"overlay2\",\n \"tlsverify\": true, \n \"tls\": true,\n + \ \"tlscacert\": \"/etc/ssl/cf-client/ca.pem\",\n \"tlscert\": \"/etc/ssl/cf/server-cert.pem\",\n + \ \"tlskey\": \"/etc/ssl/cf/server-key.pem\",\n \"insecure-registries\" : [\"192.168.99.100:5000\"],\n + \ \"metrics-addr\" : \"0.0.0.0:9323\",\n \"experimental\" : true\n}\n" +``` + +to + +```yaml +data: + daemon.json: "{\n \"hosts\": [ \"unix:///var/run/docker.sock\",\n \"tcp://0.0.0.0:1300\"],\n + \ \"storage-driver\": \"overlay2\",\n \"tlsverify\": true, \n \"tls\": true,\n + \ \"tlscacert\": \"/etc/ssl/cf-client/ca.pem\",\n \"tlscert\": \"/etc/ssl/cf/server-cert.pem\",\n + \ \"tlskey\": \"/etc/ssl/cf/server-key.pem\",\n \"insecure-registries\" : [\"192.168.99.100:5000\"],\n + \ \"registry-mirrors\": [ \"https://\" ], \n + \ \"metrics-addr\" : \"0.0.0.0:9323\",\n \"experimental\" : true\n}\n" +``` + +This adds the line `\ \"registry-mirrors\": [ \"https://\" ], \n` which contains a single registry to use as a mirror. Quit and Save by typing `:wq`. + +Now any container image that is used in your pipeline and isn't fully qualified, will be pulled through the Docker registry that is configured as a mirror. + + +### Installing the monitoring component + +If your cluster is located [behind the firewall](https://codefresh.io/docs/docs/administration/behind-the-firewall/) you might want to use the runner monitoring component to get valuable information about the cluster resources to Codefresh, for example, to [Kubernetes](https://g.codefresh.io/kubernetes/services/) and [Helm Releases](https://g.codefresh.io/helm/releases/releasesNew/) dashboards. + +To install the monitoring component you can use `--install-monitor` flag in the `runner init` command: + +```shell +codefresh runner init --install-monitor +``` + +Please note, that the monitoring component will not be installed if you use `--install-monitor` with `--skip-cluster-integration` flag. In case you want to skip adding the cluster integration during the runner installation, but still want to get the cluster resources to Codefresh dashboards, you can install the monitoring component separately: + +```shell +codefresh install monitor --kube-context-name --kube-namespace --cluster-id --token +``` + + + +## Full runtime environment specification + +The following section contains an explanation of runtime environment specification and possible options to modify it. Notice that there are additional and hidden fields that are autogenerated by Codefresh that complete a full runtime spec. You can't directly see or edit them (unless you run your own [Codefresh On-Premises Installation]({{site.baseurl}}/docs/administration/codefresh-on-prem/) ) + + +To get a list of all available runtimes execute: +```shell +codefresh get runtime-environments +#or +codefresh get re +``` + +Choose the runtime that you want to inspect or modify and get its yaml/json representation: +```shell +codefresh get re my-eks-cluster/codefresh -o yaml > runtime.yaml +#or +codefresh get re my-eks-cluster/codefresh -o json > runtime.json +``` + +Update your runtime environment with the [patch command](https://codefresh-io.github.io/cli/operate-on-resources/patch/): +```shell +codefresh patch re my-eks-cluster/codefresh -f runtime.yaml +``` + +Below is the example for the default and basic runtime spec after you've installed the Runner: + +{% highlight yaml %} +{% raw %} +version: 1 +metadata: + ... +runtimeScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} + userAccess: true + defaultDindResources: + requests: '' + pvcs: + dind: + storageClassName: dind-local-volumes-runner-codefresh +extends: + - system/default/hybrid/k8s_low_limits +description: '...' +accountId: 5f048d85eb107d52b16c53ea +{% endraw %} +{% endhighlight %} + +### Top level fields + +{: .table .table-bordered .table-hover} +| Field name | Type | Value | +| -------------- |-------------------------| -------------------------| +| `version` | string | Runtime environment version | +| `metadata` | object | Meta-information | +| `runtimeScheduler` | object | Engine pod definition | +| `dockerDaemonScheduler` | object | Dind pod definition | +| `extends` | array | System field (links to full runtime spec from Codefresh API) | +| `description` | string | Runtime environment description (k8s context name and namespace) | +| `accountId` | string | Account to which this runtime belongs | +| `appProxy` | object | Optional filed for [app-proxy]({{site.baseurl}}/docs/administration/codefresh-runner/#optional-installation-of-the-app-proxy) | + +### runtimeScheduler fields (engine) + +{: .table .table-bordered .table-hover} +| Field name | Type | Value | +| -------------- |-------------------------| -------------------------| +| `image` | string | Override default engine image | +| `imagePullPolicy` | string | Override image pull policy (default `IfNotPresent`) | +| `type` | string | `KubernetesPod` | +| `envVars` | object | Override or add environment variables passed into the engine pod | +| `userEnvVars` | object | Add external env var(s) to the pipeline. See [Custom Global Environment Variables]({{site.baseurl}}/docs/administration/codefresh-runner/#custom-global-environment-variables) | +| `cluster` | object | k8s related information (`namespace`, `serviceAccount`, `nodeSelector`) | +| `resources` | object | Specify non-default `requests` and `limits` for engine pod | +| `tolerations` | array | Add tolerations to engine pod | +| `annotations` | object | Add custom annotations to engine pod (empty by default `{}`) | +| `labels` | object | Add custom labels to engine pod (empty by default `{}`) | +| `dnsPolicy` | string | Engine pod's [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) | +| `dnsConfig` | object | Engine pod's [DNS config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config) | + +`runtimeScheduler` example: +{% highlight yaml %} +{% raw %} +runtimeScheduler: + imagePullPolicy: Always + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + nodeSelector: #schedule engine pod onto a node whose labels match the nodeSelector + node-type: engine + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} + labels: + spotinst.io/restrict-scale-down: "true" #optional label to prevent node scaling down when the runner is deployed on spot instances using spot.io + envVars: + NODE_TLS_REJECT_UNAUTHORIZED: '0' #disable certificate validation for TLS connections (e.g. to g.codefresh.io) + METRICS_PROMETHEUS_ENABLED: 'true' #enable /metrics on engine pod + DEBUGGER_TIMEOUT: '30' #debug mode timeout duration (in minutes) + userEnvVars: + - name: GITHUB_TOKEN + valueFrom: + secretKeyRef: + name: github-token + key: token + resources: + requests: + cpu: 60m + memory: 500Mi + limits: + cpu: 1000m + memory: 2048Mi + tolerations: + - effect: NoSchedule + key: codefresh.io + operator: Equal + value: engine +{% endraw %} +{% endhighlight %} + +### dockerDaemonScheduler fields (dind) + +| Field name | Type | Value | +| -------------- |-------------------------| -------------------------| +| `dindImage` | string | Override default dind image | +| `type` | string | `DindPodPvc` | +| `envVars` | object | Override or add environment variables passed into the dind pod. See [IN-DIND cleaner]({{site.baseurl}}/docs/administration/codefresh-runner/#cleaners) | +| `userVolumeMounts` with `userVolumes` | object | Add volume mounts to the pipeline See [Custom Volume Mounts]({{site.baseurl}}/docs/administration/codefresh-runner/#custom-volume-mounts) | +| `cluster` | object | k8s related information (`namespace`, `serviceAccount`, `nodeSelector`) | +| `defaultDindResources` | object | Override `requests` and `limits` for dind pod (defaults are `cpu: 400m` and `memory:800Mi` ) | +| `tolerations` | array | Add tolerations to dind pod | +| `annotations` | object | Add custom annotations to dind pod (empty by default `{}`) | +| `labels` | object | Add custom labels to dind pod (empty by default `{}`) | +| `pvc` | object | Override default storage configuration for PersistentVolumeClaim (PVC) with `storageClassName`, `volumeSize`, `reuseVolumeSelector`. See [Volume Reusage Policy]({{site.baseurl}}/docs/administration/codefresh-runner/#volume-reusage-policy) | +| `dnsPolicy` | string | Dind pod's [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) | +| `dnsConfig` | object | Dind pod's [DNS config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config) | + +`dockerDaemonScheduler` example: +{% highlight yaml %} +{% raw %} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + nodeSelector: #schedule dind pod onto a node whose labels match the nodeSelector + node-type: dind + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} + labels: + spotinst.io/restrict-scale-down: "true" #optional label to prevent node scaling down when the runner is deployed on spot instances using spot.io + userAccess: true + defaultDindResources: + requests: '' + limits: + cpu: 1000m + memory: 2048Mi + userVolumeMounts: + my-cert: + name: cert + mountPath: /etc/ssl/cert + readOnly: true + userVolumes: + my-cert: + name: cert + secret: + secretName: tls-secret + pvcs: + dind: + storageClassName: dind-local-volumes-runner-codefresh + volumeSize: 30Gi + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id' + tolerations: + - key: codefresh.io + operator: Equal + value: dinds + effect: NoSchedule +{% endraw %} +{% endhighlight %} + +### Custom Global Environment Variables +You can add your own environment variables in the runtime environment, so that all pipeline steps will have access to it. A typical example would be a shared secret that you want to pass to the pipeline. + +Under the `runtimeScheduler` block you can add an additional element with named `userEnvVars` that follows the same syntax as [secret/environment variables](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables). + +`runtime.yaml` +{% highlight yaml %} +{% raw %} +... +runtimeScheduler: + userEnvVars: + - name: GITHUB_TOKEN + valueFrom: + secretKeyRef: + name: github-token + key: token +... +{% endraw %} +{% endhighlight %} + +### Custom Volume Mounts +You can add your own volume mounts in the runtime environment, so that all pipeline steps have access to the same set of external files. A typical example of this scenario is when you want to make a set of SSL certificates available to all your pipelines. Rather than manually download the certificates in each pipeline, you can provide them centrally on the runtime level. + +Under the `dockerDaemonScheduler` block you can add two additional elements with names `userVolumeMounts` and `userVolumes` (they follow the same syntax as normal k8s `volumes` and `volumeMounts`) and define your own global volumes. + +`runtime.yaml` +{% highlight yaml %} +{% raw %} +... +dockerDaemonScheduler: + userVolumeMounts: + my-cert: + name: cert + mountPath: /etc/ssl/cert + readOnly: true + userVolumes: + my-cert: + name: cert + secret: + secretName: tls-secret +... +{% endraw %} +{% endhighlight %} + +### Debug Timeout Duration + +The default timeout for [debug mode]({{site.baseurl}}/docs/configure-ci-cd-pipeline/debugging-pipelines/) is 14 minutes, and even if the user is actively working, it is still 14 minutes. To change the duration of the debugger, you will need to update your Runtime Spec for the runtime you would like to change. To change the default duration, you will need to add `DEBUGGER_TIMEOUT` to the environment variable. The value you pass is a string value that will define the timeout in minutes. For example, you can pass '30', which will be 30 minutes. + +Under `.runtimeScheduler`, add an `envVars` section, then add `DEBUGGER_TIMEOUT` under `envVars` with the value you want. + +```yaml +... +runtimeScheduler: + envVars: + DEBUGGER_TIMEOUT: '30' +... +``` + +### Volume Reusage Policy + +The behavior of how the volumes are reused depends on volume selector configuration. +`reuseVolumeSelector` option is configurable in runtime environment spec. + +The following options are available: + +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName'` - determined PV can be used by **ANY** pipeline of your account (it's a **default** volume selector). +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id'` - determined PV can be used only by a **single pipeline**. +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id,io.codefresh.branch_name'` - determined PV can be used only by **single pipeline AND single branch**. +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id,trigger'` - determined PV can be used only by **single pipeline AND single trigger**. + +For approach `codefresh-app,io.codefresh.accountName`: + +* Benefit: less PVs --> lower cost (since any PV can be used by any pipeline, then, the cluster would need to keep less PVs in its pool of PVs for Codefresh) +* Downside: since the PV can be used by any pipeline, then, the PVs could have assets and info from different pipelines, thus reducing the probability of cache, + +For approach `codefresh-app,io.codefresh.accountName,pipeline_id`: + +* Benefit: more probability of cache (no "spam" from other pipelines) +* Downside: more PVs to keep (higher cost) + + +To change volume selector get runtime yaml spec and under `dockerDaemonScheduler.pvcs.dind` block specify `reuseVolumeSelector`: + +```yaml + pvcs: + dind: + volumeSize: 30Gi + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id' +``` + +## Runtime Cleaners + +### Key points + +* Codefresh pipelines require disk space for: + * [Pipeline Shared Volume](https://codefresh.io/docs/docs/yaml-examples/examples/shared-volumes-between-builds/) (`/codefresh/volume`, implemented as [docker volume](https://docs.docker.com/storage/volumes/)) + * Docker containers - running and stopped + * Docker images and cached layers +* To improve performance, `volume-provisioner` is able to provision previously used disk with docker images and pipeline volume from previously running builds. It improves performance by using docker cache and decreasing I/O rate. +* Least recently docker images and volumes should be cleaned to avoid out-of-space errors. +* There are several places where pipeline volume cleanup is required, so there are several kinds of cleaner. + +### Cleaners + +* [IN-DIND cleaner](https://github.com/codefresh-io/dind/tree/master/cleaner) - deletes extra docker containers, volumes, images in **dind pod** +* [External volumes cleaner](https://github.com/codefresh-io/runtime-cluster-monitor/blob/master/charts/cf-monitoring/templates/dind-volume-cleanup.yaml) - deletes unused **external** PVs (EBS, GCE/Azure disks) +* [Local volumes cleaner](https://github.com/codefresh-io/dind-volume-utils/blob/master/local-volumes/lv-cleaner.sh) - deletes **local** volumes in case node disk space is close to the threshold + +*** + +#### IN-DIND cleaner + +**Purpose:** Removes unneeded *docker containers, images, volumes* inside kubernetes volume mounted to the dind pod + +**Where it runs:** Running inside each dind pod as script + +**Triggered by:** SIGTERM and also during the run when disk usage (cleaner-agent ) > 90% (configurable) + +**Configured by:** Environment Variables which can be set in Runtime Environment configuration + +**Configuration/Logic:** [README.md](https://github.com/codefresh-io/dind/tree/master/cleaner#readme) + +Override `dockerDaemonScheduler.envVars` on Runtime Environment if necessary (the following are **defaults**): + +```yaml +dockerDaemonScheduler: + envVars: + CLEAN_PERIOD_SECONDS: '21600' # launch clean if last clean was more than CLEAN_PERIOD_SECONDS seconds ago + CLEAN_PERIOD_BUILDS: '5' # launch clean if last clean was more CLEAN_PERIOD_BUILDS builds since last build + IMAGE_RETAIN_PERIOD: '14400' # do not delete docker images if they have events since current_timestamp - IMAGE_RETAIN_PERIOD + VOLUMES_RETAIN_PERIOD: '14400' # do not delete docker volumes if they have events since current_timestamp - VOLUMES_RETAIN_PERIOD + DISK_USAGE_THRESHOLD: '0.8' # launch clean based on current disk usage DISK_USAGE_THRESHOLD + INODES_USAGE_THRESHOLD: '0.8' # launch clean based on current inodes usage INODES_USAGE_THRESHOLD +``` + +*** + +#### External volumes cleaner + +**Purpose:** Removes unused *kubernetes volumes and related backend volumes* + +**Where it runs:** On Runtime Cluster as CronJob +(`kubectl get cronjobs -n codefresh -l app=dind-volume-cleanup`). Installed in case the Runner uses non-local volumes (`Storage.Backend != local`) + +**Triggered by:** CronJob every 10min (configurable), part of [runtime-cluster-monitor](https://github.com/codefresh-io/runtime-cluster-monitor/blob/master/charts/cf-monitoring/templates/dind-volume-cleanup.yaml) and runner deployment + +**Configuration:** + +Set `codefresh.io/volume-retention` annotation on Runtime Environment: + +```yaml +dockerDaemonScheduler: + pvcs: + dind: + storageClassName: dind-ebs-volumes-runner-codefresh + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id' + volumeSize: 32Gi + annotations: + codefresh.io/volume-retention: 7d +``` + +Override environment variables for `dind-volume-cleanup` cronjob if necessary: + +* `RETENTION_DAYS` (defaults to 4) +* `MOUNT_MIN` (defaults to 3) +* `PROVISIONED_BY` (defaults to `codefresh.io/dind-volume-provisioner`) + +About *optional* `-m` argument: + +* `dind-volume-cleanup` to clean volumes that were last used more than `RETENTION_DAYS` ago +* `dind-volume-cleanup-m` to clean volumes that were used more than a day ago, but mounted less than `MOUNT_MIN` times + +*** + +#### Local volumes cleaner + +**Purpose:** Deletes local volumes in case node disk space is close to the threshold + +**Where it runs:** On each node on runtime cluster as DaemonSet `dind-lv-monitor`. Installed in case the Runner use local volumes (`Storage.Backend == local`) + +**Triggered by:** Starts clean if disk space usage or inodes usage is more than thresholds (configurable) + +**Configuration:** + +Override environment variables for `dind-lv-monitor` daemonset if necessary: + +* `VOLUME_PARENT_DIR` - default `/var/lib/codefresh/dind-volumes` +* `KB_USAGE_THRESHOLD` - default 80 (percentage) +* `INODE_USAGE_THRESHOLD` - default 80 + +## ARM Builds + +With hybrid runner it's possibe to run native ARM64v8 builds. + +>**Note:** Running both amd64 and arm64 images within the same pipeline - it is not possible. We do not support multi-architecture builds. One runtime configuration - one architecture. Considering one pipeline can map only to one runtime, it is possible to run either amd64 or arm64, but not both within a one pipeline + +The following scenario is an example of how to set up ARM Runner on existing EKS cluster: + +**Step 1 - Preparing nodes** + +Create new ARM nodegroup: + +```shell +eksctl utils update-coredns --cluster +eksctl utils update-kube-proxy --cluster --approve +eksctl utils update-aws-node --cluster --approve + +eksctl create nodegroup \ +--cluster \ +--region \ +--name \ +--node-type \ +--nodes <3>\ +--nodes-min <2>\ +--nodes-max <4>\ +--managed +``` + +Check nodes status: + +```shell +kubectl get nodes -l kubernetes.io/arch=arm64 +``` + +Also it's recommeded to label and taint the required ARM nodes: + +```shell +kubectl taint nodes arch=aarch64:NoSchedule +kubectl label nodes arch=arm +``` + +**Step 2 - Runner installation** + +Use [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml) to inject `tolerations`, `kube-node-selector`, `build-node-selector` into the Runtime Environment spec. + +`values-arm.yaml` + +```yaml +... +Namespace: codefresh + +### NodeSelector --kube-node-selector: controls runner and dind-volume-provisioner pods +NodeSelector: arch=arm + +### Tolerations --tolerations: controls runner, dind-volume-provisioner and dind-lv-monitor +Tolerations: +- key: arch + operator: Equal + value: aarch64 + effect: NoSchedule +... +######################################################## +### Codefresh Runtime ### +### ### +### configure engine and dind pods ### +######################################################## +Runtime: +### NodeSelector --build-node-selector: controls engine and dind pods + NodeSelector: + arch: arm +### Tolerations for engine and dind pods + tolerations: + - key: arch + operator: Equal + value: aarch64 + effect: NoSchedule +... +``` + +Install the Runner with: + +```shell +codefresh runner init --values values-arm.yaml --exec-demo-pipeline false --skip-cluster-integration true +``` + +**Step 3 - Post-installation fixes** + +Change `engine` image version in Runtime Environment specification: + +```shell +# get the latest engine ARM64 tag +curl -X GET "https://quay.io/api/v1/repository/codefresh/engine/tag/?limit=100" --silent | jq -r '.tags[].name' | grep "^1.*arm64$" +1.136.1-arm64 +``` + +```shell +# get runtime spec +codefresh get re $RUNTIME_NAME -o yaml > runtime.yaml +``` + +under `runtimeScheduler.image` change image tag: + +```yaml +runtimeScheduler: + image: 'quay.io/codefresh/engine:1.136.1-arm64' +``` + +```shell +# patch runtime spec +codefresh patch re -f runtime.yaml +``` + +For `local` storage patch `dind-lv-monitor-runner` DaemonSet and add `nodeSelector`: + +```shell +kubectl edit ds dind-lv-monitor-runner +``` + +```yaml + spec: + nodeSelector: + arch: arm +``` + +**Step 4 - Run Demo pipeline** + +Run a modified version of the *CF_Runner_Demo* pipeline: + +```yaml +version: '1.0' +stages: + - test +steps: + test: + stage: test + title: test + image: 'arm64v8/alpine' + commands: + - echo hello Codefresh Runner! +``` + +## Troubleshooting + +For troubleshooting refer to the [Knowledge Base](https://support.codefresh.io/hc/en-us/sections/4416999487762-Hybrid-Runner) + +## What to read next + +* [Codefresh installation options]({{site.baseurl}}/docs/installation/installation-options/) +* [Codefresh On-Premises]({{site.baseurl}}/docs/administration/codefresh-on-prem/) +* [Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/) diff --git a/_docs/runtime/git-sources.md b/_docs/installation/gitops/git-sources.md similarity index 64% rename from _docs/runtime/git-sources.md rename to _docs/installation/gitops/git-sources.md index 2b95dc542..932a77078 100644 --- a/_docs/runtime/git-sources.md +++ b/_docs/installation/gitops/git-sources.md @@ -1,23 +1,24 @@ --- -title: "Add Git Sources to runtimes" -description: "" -group: runtime +title: "Add Git Sources to GitOps Runtimes" +description: "Manage Git Sources storing resources" +group: installation +sub_group: gitops toc: true --- -A Git Source is the equivalent of an Argo CD application that tracks a Git repository and syncs the desired state of the repo to the destination K8s cluster. In addition to application resources, the Git Source can store resources for Codefresh runtimes, and CI/CD entities such as delivery pipelines, Workflow Templates, workflows, and applications. +A Git Source is the equivalent of an Argo CD application that tracks a Git repository and syncs the desired state of the repo to the destination K8s cluster. In addition to application resources, the Git Source can store resources for GitOps Runtimes, and CI/CD entities such as delivery pipelines, Workflow Templates, workflows, and applications. -Provisioning a runtime automatically creates a Git Source that stores resources for the runtime and for the demo CI pipelines that are optionally installed with the runtime. Every Git Source is associated with a Codefresh runtime. A runtime can have one or more Git Sources. You can add Git Sources at any time, to the same or to different runtimes. +Provisioning a GitOps Runtime automatically creates a Git Source that stores resources for the Runtime and for the demo CI pipelines that are optionally installed with the Runtime. Every Git Source is associated with a GitOps Runtime. You can add more Git Sources at any time, to the same or to different Runtimes. -Once you create a Git Source for a runtime, you can store resources for CI/CD entities associated with that runtime. For example, when creating pipelines or applications, you can select the Git Source to which to store manifest definitions. +Once you create a Git Source for a GitOps Runtime, you can store resources for CI/CD entities associated with it. For example, when creating pipelines or applications, you can select the Git Source to which to store manifest definitions. -### View Git Sources and definitions +## View Git Sources and definitions Drill down on a runtime in List View to see its Git Sources. -1. In the Codefresh UI, go to the [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"} page. -1. From the **List View** (the default), select a runtime name, and then select the **Git Sources** tab. +1. In the Codefresh UI, go to the [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"} page. +1. From the **List View** (the default), select a Runtime name, and then select the **Git Sources** tab. {% include image.html @@ -26,20 +27,20 @@ Drill down on a runtime in List View to see its Git Sources. url="/images/runtime/git-source-list.png" alt="Git Sources in runtime" caption="Git Sources in runtime" - max-width="30%" + max-width="60%" %} {:start="3"} 1. To go to the repo tracked by the Git Source, in the Repo column, mouse over the repo name and select **Go to Git repo**. 1. To see the definitions for the Git Source, select the three dots at the end of the row. -### Create a Git Source -Create Git Sources for any provisioned runtime. The Git Sources are available to store resources for pipelines or applications when you create them. +## Create a Git Source +Create Git Sources for any provisioned Runtime. The Git Sources are available to store resources for pipelines or applications when you create them. >Make sure you are in the List View to create Git Sources. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. -1. In the List View, select the runtime for which to add a Git Source, and then select the **Git Sources** tab. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. +1. In the List View, select the Runtime for which to add a Git Source, and then select the **Git Sources** tab. 1. Select **Create Git Sources**, and in the Create Git Source panel, define the definitions for the Git Source: {% include @@ -49,14 +50,14 @@ Create Git Sources for any provisioned runtime. The Git Sources are available t url="/images/runtime/create-git-source.png" alt="Create Git Source" caption="Create Git Source" - max-width="30%" + max-width="60%" %} * **Git Source Name**: The name of the Git Source, which must be unique within the cluster. * **Source**: The Git repo with the desired state, tracked by the Git Source, and synced to the destination cluster. * **Repository**: Mandatory. The URL to the Git repo. * **Branch**: Optional. The specific branch within the repo to track. - * **Path**: Optional. The specific path within the repo, and branch, if one is specified, to track. + * **Path**: Optional. The specific path within the repo, and branch if one is specified, to track. * **Destination**: The destination cluster with the actual state to which to apply the changes from the **Source**. * **Namespace**: The namespace in the destination cluster to which to sync the changes. @@ -69,12 +70,12 @@ Create Git Sources for any provisioned runtime. The Git Sources are available t {:start="4"} 1. Select **+ Create Git Source**. -### Edit Git Source definitions +## Edit Git Source definitions Edit an existing Git Source by changing the source and destination definitions. > You cannot change the name of the Git Source. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. -1. From the **List View** (the default), select the runtime with the Git Source, and then select the **Git Sources** tab. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. +1. From the **List View** (the default), select the Runtime with the Git Source, and then select the **Git Sources** tab. 1. In the row with the Git Source to edit, select the three dots, and then select **Edit** in the panel that appears. {% include @@ -89,13 +90,13 @@ Edit an existing Git Source by changing the source and destination definitions. {:start="4"} 1. Change the **Source** and **Destination** definitions for the Git Source, and select **Save**. -### View/download logs for a Git Source -View online logs for any Git Source associated with a runtime, and if needed, download the log file for offline viewing and analysis. -Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events from the application launch to the date and time of download. +## View/download logs for a Git Source +View online logs for any Git Source associated with a Runtime, and if needed, download the log file for offline viewing and analysis. +Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events, from the application launch to the date and time of download. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. -1. From the **List View** (the default), select the runtime with the Git Source, and then select the **Git Sources** tab. -1. In the row with the Git Source foe which to view/download logs, select the three dots, and then select **View Logs**. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. +1. From the **List View** (the default), select the Runtime with the Git Source, and then select the **Git Sources** tab. +1. In the row with the Git Source for which to view/download logs, select the three dots, and then select **View Logs**. {% include image.html @@ -104,7 +105,7 @@ Online logs show up to 1000 of the most recent events (lines), updated in real t url="/images/runtime/git-source-view-logs.png" alt="Edit Git Source" caption="Edit Git Source" - max-width="30%" + max-width="60%" %} {:start="4"} @@ -126,7 +127,8 @@ Online logs show up to 1000 of the most recent events (lines), updated in real t 1. To download the log, click **Download**. The file is downloaded with `.log` extension. -### What to read next -[Manage runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Recover runtimes]({{site.baseurl}}/docs/runtime/runtime-recovery/) +## Related articles +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/monitor-manage-runtimes/) +[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) + diff --git a/_docs/runtime/hosted-runtime.md b/_docs/installation/gitops/hosted-runtime.md similarity index 61% rename from _docs/runtime/hosted-runtime.md rename to _docs/installation/gitops/hosted-runtime.md index 0a08ba3bc..11bd496ea 100644 --- a/_docs/runtime/hosted-runtime.md +++ b/_docs/installation/gitops/hosted-runtime.md @@ -1,18 +1,29 @@ --- -title: "Set up a hosted runtime environment" -description: "" -group: runtime +title: "Hosted GitOps Runtime setup" +description: "Provision Hosted GitOps environment" +group: installation +sub_group: gitops toc: true --- -If you have Codefresh's Hosted GitOps, set up your hosted environment, and you are all ready to leverage extensive CD Ops capabilities. -Read about [Hosted GitOps]({{site.baseurl}}/docs/incubation/intro-hosted-runtime/). +Set up your environment with the Hosted GitOps Runtime to leverage extensive CD capabilities. + -### Where to start with Hosted GitOps -If you have not provisioned a hosted runtime, Codefresh presents you with the setup instructions in the **Home** dashboard. +## System requirements for Hosted GitOps Runtimes +{: .table .table-bordered .table-hover} +| Item | Requirement | +| -------------- | -------------- | +|Kubernetes cluster | Server version 1.18 and higher to which to deploy applications| +|Git provider | {::nomarkdown}
        • GitHub
        • Bitbucket Cloud
        {:/}| + + +## Where to start with Hosted GitOps Runtimes +If you have not provisioned a Hosted GitOps Runtime, Codefresh presents you with the setup instructions in the **Home** dashboard. + + * In the Codefresh UI, go to Codefresh [Home](https://g.codefresh.io/2.0/?time=LAST_7_DAYS){:target="\_blank"}. Codefresh guides you through the three-step setup, as described below. @@ -27,18 +38,18 @@ caption="Hosted GitOps setup" max-width="80%" %} - >You can provision a single hosted runtime for your Codefresh account. + >You can provision a single Hosted GitOps Runtime per Codefresh account. -### 1. Provision hosted runtime -Start installing the hosted runtime with a single-click. Codefresh completes the installation without any further intervention on your part. -The hosted runtime is provisioned on the Codefresh cluster, and completely managed by Codefresh with automatic version and security upgrades. +## Step 1: Install Hosted GitOps Runtime +Start installing the Hosted GitOps Runtime with a single-click. Codefresh completes the installation without any further intervention on your part. +The Hosted GitOps Runtime is provisioned on the Codefresh cluster, and completely managed by Codefresh with automatic version and security upgrades. 1. Do one of the following: - * To set up Hosted GitOps later, click **Install later**, and continue from step _2_. + * To set up Hosted GitOps Runtime later, click **Install later**, and continue from step _2_. * To start setup, click **Install**, and continue from step _3_. {% include @@ -46,16 +57,16 @@ image.html lightbox="true" file="/images/runtime/hosted-installing.png" url="/images/runtime/hosted-installing.png" -alt="Step 1: Installing hosted runtime" -caption="Step 1: Installing hosted runtime" +alt="Step 1: Installing Hosted GitOps Runtime" +caption="Step 1: Installing Hosted GitOps Runtime" max-width="80%" %} {:start="2"} 1. Do the following: - * In the Codefresh UI, go to [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and click **+ Add Runtimes**. - * Select **Hosted Runtime** and click **Add**. - >An account can be provisioned with a single hosted runtime. If you have already provisioned a hosted runtime for your account, the Hosted Runtime option is disabled. + * In the Codefresh UI, go to [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and click **+ Add Runtimes**. + * Select **Hosted GitOps Runtime** and click **Add**. + >An account can be provisioned with a single Hosted GitOps Runtime. If you have already provisioned a Hosted GitOps Runtime for your account, the Hosted GitOps Runtime option is disabled. * Continue from _step 3_. {% include @@ -63,14 +74,14 @@ image.html lightbox="true" file="/images/runtime/hosted-install-later.png" url="/images/runtime/hosted-install-later.png" -alt="Install hosted runtime" -caption="Install hosted runtime" +alt="Install Hosted GitOps Runtime" +caption="Install Hosted GitOps Runtime" max-width="40%" %} {:start="3"} -1. When complete, to view the components for the hosted runtime, click **View Runtime**. +1. When complete, to view the components for the Hosted GitOps Runtime, click **View Runtime**. You are directed to the Runtime Components tab. {% include @@ -78,14 +89,14 @@ image.html lightbox="true" file="/images/runtime/hosted-runtime-components.png" url="/images/runtime/hosted-runtime-components.png" -alt="Runtime components for hosted runtime" -caption="Runtime components for hosted runtime" +alt="Runtime components for Hosted GitOps Runtime" +caption="Runtime components for Hosted GitOps Runtime" max-width="70%" %} > The Git Sources and the Managed Clusters are empty as they will be set up in the next steps. -If you navigate to **Runtimes > List View**, you can identify the hosted runtime through the Type column (Hosted ), the Cluster/Namespace column (Codefresh), and the Module column (CD Ops). +If you navigate to **Runtimes > List View**, you can identify the Hosted GitOps Runtime through the Type column (Hosted), the Cluster/Namespace column (Codefresh), and the Module column (CD Ops). {% include image.html @@ -97,8 +108,8 @@ caption="Hosted runtimes in List view" max-width="70%" %} -#### Troubleshoot failed hosted runtime installation -Your hosted runtime may fail to install with an error as in the image below. We are closely moinitoring the hosted runtime installation process and activley working to prevent and iron out all installation errors. Follow the instructions to uninstall and reinstall the hosted runtime. +### Troubleshoot failed Hosted GitOps Runtime installation +Your Hosted GitOps Runtime may fail to install with an error as in the image below. We are closely moinitoring the Hosted GitOps Runtime installation process and activley working to prevent and iron out all installation errors. Follow the instructions to uninstall and reinstall the Hosted GitOps Runtime. {% include image.html @@ -117,16 +128,16 @@ max-width="70%" To compare with the latest version from Codefresh, [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"}. * [Download the CLI]({{site.baseurl}}/docs/clients/csdp-cli/). -1. Uninstall the failed hosted runtime: +1. Uninstall the failed Hosted GitOps Runtime: `cf runtime uninstall codefresh-hosted --force` where: - `hosted-codefresh` is the name of your hosted runtime, automatically assigned by Codefresh. + `hosted-codefresh` is the name of your Hosted GitOps Runtime, automatically assigned by Codefresh. 1. In the Codefresh UI, return to Codefresh [Home](https://g.codefresh.io/2.0/?time=LAST_7_DAYS){:target="\_blank"}. -1. Refresh the page and start with _1. Provision hosted runtime_ above. +1. Refresh the page and start with [Step 1: Install Hosted GitOps Runtime](#step-1-install-hosted-gitops-runtime). -### 2. Connect Git provider -Connect your hosted runtime to a Git provider for Codefresh to create the required Git repos. First authorize access to your Git provider through an OAuth token, and then select the Git organizations or accounts in which to create the required Git repos. +## Step 2: Connect Git provider +Connect your Hosted GitOps Runtime to a Git provider for Codefresh to create the required Git repos. First authorize access to your Git provider through an OAuth token, and then select the Git organizations or accounts in which to create the required Git repos. >Only authorized organizations are displayed in the list. To authorize organizations for the Codefresh application in GitHub, see [Authorize organizations/projects]({{site.baseurl}}/docs/administration/hosted-authorize-orgs/). @@ -145,12 +156,12 @@ max-width="80%" Once you authorize access, Codefresh creates two Git repositories, one to store the runtime configuration settings, and the other to store the runtime's application settings: * Shared runtime configuration repo - The shared runtime configuration repo is a centralized Git repository that stores configuration settings for the hosted runtime. Additional runtimes provisioned for the account can point to this repo to retrieve and reuse the configuration. + The shared runtime configuration repo is a centralized Git repository that stores configuration settings for the Hosted GitOps Runtime. Additional runtimes provisioned for the account can point to this repo to retrieve and reuse the configuration. Read about [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/). * Git Source application repo - Codefresh creates a Git Source application repo for every hosted runtime. + Codefresh creates a Git Source application repo for every Hosted GitOps Runtime. Read about [Git sources]({{site.baseurl}}/docs/runtime/git-sources/). @@ -224,16 +235,16 @@ image.html lightbox="true" file="/images/runtime/hosted-git-source-in-ui.png" url="/images/runtime/hosted-git-source-in-ui.png" -alt="Git Source tab for hosted runtime" -caption="Git Source tab for hosted runtime" +alt="Git Source tab for Hosted GitOps Runtime" +caption="Git Source tab for Hosted GitOps Runtime" max-width="80%" %} -### 3. Connect a Kubernetes cluster +## 3. Connect a Kubernetes cluster -Connect a destination cluster to the hosted runtime and register it as a managed cluster. Deploy applications and configuration to the cluster. -For managed cluster information and installing Argo Rollouts, see [Add and manage external clusters]({{site.baseurl}}/docs/runtime/managed-cluster/). +Connect a destination cluster to the Hosted GitOps Runtime and register it as a managed cluster. Deploy applications and configuration to the cluster. +For managed cluster information and installing Argo Rollouts, see [Add and manage external clusters]({{site.baseurl}}/docs/installation/gitops/managed-cluster/). {% include @@ -241,8 +252,8 @@ image.html lightbox="true" file="/images/runtime/hosted-connect-cluster-step.png" url="/images/runtime/hosted-connect-cluster-step.png" -alt="Step 3: Connect a K8s cluster for hosted runtime" -caption="Step 3: Connect a K8s cluster for hosted runtime" +alt="Step 3: Connect a K8s cluster for Hosted GitOps Runtime" +caption="Step 3: Connect a K8s cluster for Hosted GitOps Runtime" max-width="70%" %} @@ -273,8 +284,8 @@ max-width="70%" lightbox="true" file="/images/runtime/hosted-new-cluster-topology.png" url="/images/runtime/hosted-new-cluster-topology.png" - alt="New K8s cluster in hosted runtime" - caption="New K8s cluster in hosted runtime" + alt="New K8s cluster in Hosted GitOps Runtime" + caption="New K8s cluster in Hosted GitOps Runtime" max-width="80%" %} @@ -282,12 +293,9 @@ max-width="70%" 1. Configure access to the IP addresses required. See [Codefresh IP addresses]({{site.baseurl}}/docs/administration/platform-ip-addresses/). If you could not connect a cluster, you may not have the latest version of the CLI: -* If you have installed the Codefresh CLI already, make sure you have the latest version: - `cf version` - To compare with the latest version from Codefresh, [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"}. -* [Download the CLI]({{site.baseurl}}/docs/clients/csdp-cli/). +[Upgrade the GitOps CLI]({{site.baseurl}}/docs/clients/upgrade-gitops-cli/). -You have completed setting up your hosted runtime. You are ready to create applications, and connect third-party CI tools for image enrichment. +You have completed setting up your Hosted GitOps Runtime. You are ready to create applications, and connect third-party CI tools for image enrichment. ### (Optional) Create application Optional. Create an application in Codefresh, deploy it to the cluster, and track deployment and performance in the Applications dashboard. @@ -299,14 +307,15 @@ Optional. Create an application in Codefresh, deploy it to the cluster, and trac {:start="2"} 2. In the Codefresh UI, view your application in the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard){:target="\_blank"}. -### (Optional) Connect CI +## (Optional) Connect CI Optional. Integrate Codefresh with the third-party tools you use for CI to enrich image information in deployments. [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) ### Related articles -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard/) -[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/monitor-manage-runtimes/) +[Add Git Sources to runtimes]({{site.baseurl}}/docs/installation/gitops/git-sources/) +[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) +[Home dashboard]({{site.baseurl}}/docs/dashboards/home-dashboard/) +[DORA metrics]({{site.baseurl}}/docs/dashboards/dora-metrics/) diff --git a/_docs/installation/gitops/hybrid-gitops.md b/_docs/installation/gitops/hybrid-gitops.md new file mode 100644 index 000000000..a2325debb --- /dev/null +++ b/_docs/installation/gitops/hybrid-gitops.md @@ -0,0 +1,1290 @@ +--- +title: "Hybrid GitOps Runtime installation" +description: "Provision Hybrid GitOps Runtimes" +group: installation +sub_group: gitops +toc: true +--- + +Provision one or more Hybrid GitOps Runtimes in your Codefresh account. +Start by reviewing [system requirements](#minimum-system-requirements) for Hybrid GitOps. +If you are installing with ingress-controllers, you must configure them as required _before_ starting the installation. + +> To provision a Hosted GitOps Runtime, see [Provision a hosted runtime]({{site.baseurl}}/docs/installation/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/installation/hosted-runtime/). + +**Git providers and Hybrid Runtimes** +Your Codefresh account is always linked to a specific Git provider. This is the Git provider you select on installing the first GitOps Runtime, either Hybrid or Hosted, in your Codefresh account. All the Hybrid GitOps Runtimes you install in the same account use the same Git provider. +If Bitbucker Server is your Git provider, you must also select the specific server instance to associate with the runtime. + +>To change the Git provider for your Codefresh account after installation, contact Codefresh support. + + +**Codefresh and Argo CD s** + The Hybrid GitOps Runtime comprises Argo CD components and Codefresh-specific components. + +Codefresh users rely on our platform to deliver software reliably, and predictably without interruption. +To maintain that high standard, we add several weeks of testing and bug fixes to new versions of Argo before making them available within Codefresh. +Typically, new versions of Argo are available within 30 days of release in Argo. + + +There are two parts to installing a Hybrid GitOps Runtime: + +1. [Installing the GitOps CLI](#gitops-cli-installation) +2. [Installing the Hybrid GitOps Runtime](#install-hybrid-gitops-runtime), either through the CLI wizard or via silent installation through the installation flags. + The Hybrid GitOps Runtime is installed in a specific namespace on your cluster. You can install more Hybrid GitOps Runtimes on different clusters in your deployment. + Every Hybrid GitOps Runtime installation makes commits to three Git repos: + * Runtime install repo: The installation repo that manages the Hybrid Runtime itself with Argo CD. If the repo URL does not exist, it is automatically created during installation. + * Git Source repo: Created automatically during Runtime installation. The repo where you store manifests for pipelines and applications. See [Git Sources]({{site.baseurl}}/docs/installation/gitops/git-sources). + * Shared configuration repo: Created for the first GitOps Runtime installed in your account. The repo stores configuration manifests for account-level resources and is shared with other GitOps Runtimes in the same account. See [Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration). + + + +{::nomarkdown} +
        +{:/} + +## Minimum system requirements + +{: .table .table-bordered .table-hover} +| Item | Requirement | +| -------------- | -------------- | +|Kubernetes cluster | Server version 1.18 and higher, without Argo Project components. {::nomarkdown}
        Tip: To check the server version, run:
        kubectl version --short.{:/}| +| Ingress controller| Configured on Kubernetes cluster and exposed from the cluster. {::nomarkdown}
        Supported and tested ingress controllers include:
        • Ambassador
        • {:/}(see [Ambassador ingress configuration](#ambassador-ingress-configuration)){::nomarkdown}
        • AWS ALB (Application Load Balancer)
        • {:/} (see [AWS ALB ingress configuration](#aws-alb-ingress-configuration)){::nomarkdown}
        • Istio
        • {:/} (see [Istio ingress configuration](#istio-ingress-configuration)){::nomarkdown}
        • NGINX Enterprise (nginx.org/ingress-controller)
        • {:/} (see [NGINX Enterprise ingress configuration](#nginx-enterprise-ingress-configuration)){::nomarkdown}
        • NGINX Community (k8s.io/ingress-nginx)
        • {:/} (see [NGINX Community ingress configuration](#nginx-community-version-ingress-configuration)){::nomarkdown}
        • Trafik
        • {:/}(see [Traefik ingress configuration](#traefik-ingress-configuration))| +|Node requirements| {::nomarkdown}
          • Memory: 5000 MB
          • CPU: 2
          {:/}| +|Cluster permissions | Cluster admin permissions | +|Git providers |{::nomarkdown}
          • GitHub
          • GitHub Enterprise
          • GitLab Cloud
          • GitLab Server
          • Bitbucket Cloud
          • Bitbucket Server
          {:/}| +|Git access tokens | {::nomarkdown}Git runtime token:
          • Valid expiration date
          • Scopes:
            • GitHub and GitHub Enterprise: repo, admin-repo.hook
            • GitLab Cloud and GitLab Server: api, read_repository
            • Bitbucket Cloud and Server: Permissions: Read, Workspace membership: Read, Webhooks: Read and write, Repositories: Write, Admin
            {:/}| + +## Ingress controller configuration + +### Ambassador ingress configuration +For detailed configuration information, see the [Ambassador ingress controller documentation](https://www.getambassador.io/docs/edge-stack/latest/topics/running/ingress-controller){:target="\_blank"}. + +This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. +* Valid external IP address +* Valid TLS certificate +* TCP support + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + {::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +

            +{:/} + +### AWS ALB ingress configuration + +For detailed configuration information, see the [ALB AWS ingress controller documentation](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4){:target="\_blank"}. + +This table lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} +| What to configure | When to configure | +| -------------- | -------------- | +|Valid external IP address | _Before_ installing hybrid runtime | +|Valid TLS certificate | | +|TCP support| | +|Controller configuration] | | +|Alias DNS record in route53 to load balancer | _After_ installing hybrid runtime | +|(Optional) Git integration registration | | + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + +#### Controller configuration +In the ingress resource file, verify that `spec.controller` is configured as `ingress.k8s.aws/alb`. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: alb +spec: + controller: ingress.k8s.aws/alb +``` + +{::nomarkdown} +
            +{:/} + +#### Create an alias to load balancer in route53 + +> The alias must be configured _after_ installing the hybrid runtime. + +1. Make sure a DNS record is available in the correct hosted zone. +1. _After_ hybrid runtime installation, in Amazon Route 53, create an alias to route traffic to the load balancer that is automatically created during the installation: + * **Record name**: Enter the same record name used in the installation. + * Toggle **Alias** to **ON**. + * From the **Route traffic to** list, select **Alias to Application and Classic Load Balancer**. + * From the list of Regions, select the region. For example, **US East**. + * From the list of load balancers, select the load balancer that was created during installation. + +For more information, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html){:target="\_blank"}. + +{% include image.html + lightbox="true" + file="/images/runtime/post-install-alb-ingress.png" + url="/images/runtime/post-install-alb-ingress.png" + alt="Route 53 record settings for AWS ALB" + caption="Route 53 record settings for AWS ALB" + max-width="60%" +%} + +{::nomarkdown} +
            +{:/} + +#### (Optional) Git integration registration +If the installation failed, as can happen if the DNS record was not created within the timeframe, manually create and register Git integrations using these commands: + `cf integration git add default --runtime --api-url ` + `cf integration git register default --runtime --token ` + +{::nomarkdown} +

            +{:/} + +### Istio ingress configuration +For detailed configuration information, see [Istio ingress controller documentation](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress){:target="\_blank}. + +The table below lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} +| What to configure | When to configure | +| -------------- | -------------- | +|Valid external IP address |_Before_ installing hybrid runtime | +|Valid TLS certificate| | +|TCP support | | +|Cluster routing service | _After_ installing hybrid runtime | + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + + + +#### Cluster routing service +> The cluster routing service must be configured _after_ installing the hybrid runtime. + +Based on the runtime version, you need to configure a single or multiple `VirtualService` resources for the `app-proxy`, `webhook`, and `workflow` services. + +##### Runtime version 0.0.543 or higher +Configure a single `VirtualService` resource to route traffic to the `app-proxy`, `webhook`, and `workflow` services, as in the example below. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + namespace: pov-codefresh-istio-runtime # replace with your runtime name + name: internal-router +spec: + hosts: + - pov-codefresh-istio-runtime.sales-dev.codefresh.io # replace with your host name + gateways: + - istio-system/internal-router # replace with your gateway name + http: + - match: + - uri: + prefix: /webhooks + route: + - destination: + host: internal-router + port: + number: 80 + - match: + - uri: + prefix: /app-proxy + route: + - destination: + host: internal-router + port: + number: 80 + - match: + - uri: + prefix: /workflows + route: + - destination: + host: internal-router + port: + number: 80 +``` + +##### Runtime version 0.0.542 or lower + +Configure two different `VirtualService` resources, one to route traffic to the `app-proxy`, and the second to route traffic to the `webhook` services, as in the examples below. + +{::nomarkdown} +
            +{:/} + +**`VirtualService` example for `app-proxy`:** + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + namespace: test-runtime3 # replace with your runtime name + name: cap-app-proxy +spec: + hosts: + - my.support.cf-cd.com # replace with your host name + gateways: + - my-gateway # replace with your host name + http: + - match: + - uri: + prefix: /app-proxy + route: + - destination: + host: cap-app-proxy + port: + number: 3017 +``` + +**`VirtualService` example for `webhook`:** + +> Configure a `uri.prefix` and `destination.host` for each event-source if you have more than one. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + namespace: test-runtime3 # replace with your runtime name + name: csdp-default-git-source +spec: + hosts: + - my.support.cf-cd.com # replace with your host name + gateways: + - my-gateway # replace with your gateway name + http: + - match: + - uri: + prefix: /webhooks/test-runtime3/push-github # replace `test-runtime3` with your runtime name, and `push-github` with the name of your event source + route: + - destination: + host: push-github-eventsource-svc # replace `push-github' with the name of your event source + port: + number: 80 + - match: + - uri: + prefix: /webhooks/test-runtime3/cypress-docker-images-push # replace `test-runtime3` with your runtime name, and `cypress-docker-images-push` with the name of your event source + route: + - destination: + host: cypress-docker-images-push-eventsource-svc # replace `cypress-docker-images-push` with the name of your event source + port: + number: 80 +``` + +{::nomarkdown} +

            +{:/} + +### NGINX Enterprise ingress configuration + +For detailed configuration information, see [NGINX ingress controller documentation](https://docs.nginx.com/nginx-ingress-controller){:target="\_blank}. + +The table below lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} +| What to configure | When to configure | +| -------------- | -------------- | +|Verify valid external IP address |_Before_ installing hybrid runtime | +|Valid TLS certificate | | +|TCP support| | +|NGINX Ingress: Enable report status to cluster | | +|NGINX Ingress Operator: Enable report status to cluster| | +|Patch certificate secret |_After_ installing hybrid runtime + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + +#### NGINX Ingress: Enable report status to cluster + +If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. + +* Pass `--report-ingress-status` to `deployment`. + +```yaml +spec: + containers: + - args: + - --report-ingress-status +``` + +{::nomarkdown} +
            +{:/} + +#### NGINX Ingress Operator: Enable report status to cluster + +If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. + +1. Add this to the `Nginxingresscontrollers` resource file: + + ```yaml + ... + spec: + reportIngressStatus: + enable: true + ... + ``` + +1. Make sure you have a certificate secret in the same namespace as the runtime. Copy an existing secret if you don't have one. +You will need to add this to the `ingress-master` when you have completed runtime installation. + +{::nomarkdown} +
            +{:/} + +#### Patch certificate secret +> The certificate secret must be configured _after_ installing the hybrid runtime. + +Patch the certificate secret in `spec.tls` of the `ingress-master` resource. +The secret must be in the same namespace as the runtime. + +1. Go to the runtime namespace with the NGINX ingress controller. +1. In `ingress-master`, add to `spec.tls`: + + ```yaml + tls: + - hosts: + - + secretName: + ``` + +{::nomarkdown} +

            +{:/} + +### NGINX Community version ingress configuration + +Codefresh has been tested with and supports implementations of the major providers. For your convenience, we have provided configuration instructions, both for supported and untested providers in [Provider-specific configuration](#provider-specific-configuration). + + +This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. +* Verify valid external IP address +* Valid TLS certificate +* TCP support + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services, and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +Here's an example of TCP configuration for NGINX Community on AWS. +Verify that the `ingress-nginx-controller` service manifest has either of the following annotations: + +`service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"` +OR +`service.beta.kubernetes.io/aws-load-balancer-type: nlb` + +{::nomarkdown} +
            +{:/} + +#### Provider-specific configuration + +> The instructions are valid for `k8s.io/ingress-nginx`, the community version of NGINX. + +
            +AWS +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/aws/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for AWS. +
            +
            +Azure (AKS) +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for AKS. + +
            + +
            +Bare Metal Clusters +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/baremetal/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +Bare-metal clusters often have additional considerations. See Bare-metal ingress-nginx considerations. + +
            + +
            +Digital Ocean +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/do/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Digital Ocean. + +
            + +
            +Docker Desktop +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Docker Desktop.
            +Note: By default, Docker Desktop services will provision with localhost as their external address. Triggers in delivery pipelines cannot reach this instance unless they originate from the same machine where Docker Desktop is being used. + +
            + +
            +Exoscale +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/exoscale/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Exoscale. + +
            + + +
            +Google (GKE) +
            +Add firewall rules +
            +GKE by default limits outbound requests from nodes. For the runtime to communicate with the control-plane in Codefresh, add a firewall-specific rule. + +
              +
            1. Find your cluster's network:
              + gcloud container clusters describe [CLUSTER_NAME] --format=get"(network)" +
            2. +
            3. Get the Cluster IPV4 CIDR:
              + gcloud container clusters describe [CLUSTER_NAME] --format=get"(clusterIpv4Cidr)" +
            4. +
            5. Replace the `[CLUSTER_NAME]`, `[NETWORK]`, and `[CLUSTER_IPV4_CIDR]`, with the relevant values:
              + gcloud compute firewall-rules create "[CLUSTER_NAME]-to-all-vms-on-network"
              + + --network="[NETWORK]" \ +
              + + --source-ranges="[CLUSTER_IPV4_CIDR]" \ +
              + + --allow=tcp,udp,icmp,esp,ah,sctp +
              +
            6. +
            +
            +Use ingress-nginx
            +
              +
            1. Create a `cluster-admin` role binding:
              + + kubectl create clusterrolebinding cluster-admin-binding \ +
              + + --clusterrole cluster-admin \ +
              + + --user $(gcloud config get-value account) +
              +
            2. +
            3. Apply:
              + + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml + +
            4. +
            5. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            6. + +
            +We recommend reviewing the provider-specific documentation for GKE. + +
            + + +
            +MicroK8s +
              +
            1. Install using Microk8s addon system:
              + microk8s enable ingress +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +MicroK8s has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. + +
            + + +
            +MiniKube +
              +
            1. Install using MiniKube addon system:
              + minikube addons enable ingress +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +MiniKube has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. + +
            + + + +
            +Oracle Cloud Infrastructure +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Oracle Cloud. + +
            + +
            +Scaleway +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/scw/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Scaleway. + +
            + +{::nomarkdown} +

            +{:/} + +### Traefik ingress configuration +For detailed configuration information, see [Traefik ingress controller documentation](https://doc.traefik.io/traefik/providers/kubernetes-ingress){:target="\_blank}. + +The table below lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} + +| What to configure | When to configure | +| -------------- | -------------- | +|Valid external IP address | _Before_ installing hybrid runtime | +|Valid SSL certificate | | +|TCP support | | +|Enable report status to cluster| | + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + +#### Enable report status to cluster +By default, the Traefik ingress controller is not configured to report its status to the cluster. If not configured, Argo’s health check reports the health status as “progressing”, resulting in a timeout error during installation. + +To enable reporting its status, add `publishedService` to `providers.kubernetesIngress.ingressEndpoint`. + +The value must be in the format `"/"`, where: + `` is the Traefik service from which to copy the status + +```yaml +... +providers: + kubernetesIngress: + ingressEndpoint: + publishedService: "/" # Example, "codefresh/traefik-default" +... +``` + +{::nomarkdown} +
            +{:/} + +## GitOps CLI installation + +### GitOps CLI installation modes +The table lists the modes available to install the Codefresh CLI. + +{: .table .table-bordered .table-hover} +| Install mode | OS | Commands | +| -------------- | ----------| ----------| +| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| +| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | +| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | +| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| +| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`| + +### Install the GitOps CLI +Install the GitOps CLI using the option that best suits you: `curl`, `brew`, or standard download. +If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for the installation. + +1. Do one of the following: + * For first-time installation, go to the Welcome page, select **+ Install Runtime**. + * If you have provisioned a GitOps Runtime, in the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtime**. +1. Install the Codefresh CLI: + * Select one of the installation modes. + * Generate the API key. + * Create the authentication context: + `cf config create-context codefresh --api-key ` + + + {% include + image.html + lightbox="true" + file="/images/getting-started/quick-start/quick-start-download-cli.png" + url="/images/getting-started/quick-start/quick-start-download-cli.png" + alt="Download CLI to install runtime" + caption="Download CLI to install runtime" + max-width="30%" + %} + + +{::nomarkdown} +

            +{:/} + +## Install Hybrid GitOps Runtime + +### Before you begin +* Make sure you meet the [minimum requirements]({{site.baseurl}}/docs/runtime/requirements/#minimum-requirements) for installation +* Make sure you have [Runtime token with the required scopes from your Git provider]({{site.baseurl}}/docs/reference/git-tokens) +* [Download or upgrade to the latest version of the CLI]({{site.baseurl}}/docs/installation/clients/upgrade-gitops-cli) +* Review [Hybrid Runtime installation flags](#hybrid-runtime-installation-flags) +* For ingress-based runtimes, make sure your ingress controller is configured correctly: + * [Ambasador ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#ambassador-ingress-configuration) + * [AWS ALB ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#alb-aws-ingress-configuration) + * [Istio ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#istio-ingress-configuration) + * [NGINX Enterprise ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-enterprise-ingress-configuration) + * [NGINX Community ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-community-version-ingress-configuration) + * [Traefik ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#traefik-ingress-configuration) + + +{::nomarkdown} +
            +{:/} + +### How to + +1. Do one of the following: + * If this is your first Hybrid GitOps installation, in the Welcome page, select **+ Install Runtime**. + * If you have already provisioned a Hybrid GitOps Runtime, to provision additional runtimes, in the Codefresh UI: + On the toolbar, click the **Settings** icon, and expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Click **+ Add Runtimes**, and then select **Hybrid Runtimes**. +1. Do one of the following: + * CLI wizard: Run `cf runtime install`, and follow the prompts to enter the required values. + * Silent install: Pass the required flags in the install command: + `cf runtime install --repo --git-token --silent` + For the list of flags, see [Hybrid runtime installation flags](#hybrid-runtime-installation-flags). +1. If relevant, complete the configuration for these ingress controllers: + * [ALB AWS: Alias DNS record in route53 to load balancer]({{site.baseurl}}/docs/runtime/requirements/#alias-dns-record-in-route53-to-load-balancer) + * [Istio: Configure cluster routing service]({{site.baseurl}}/docs/runtime/requirements/#cluster-routing-service) + * [NGINX Enterprise ingress controller: Patch certificate secret]({{site.baseurl}}/docs/runtime/requirements/#patch-certificate-secret) +1. If you bypassed installing ingress resources with the `--skip-ingress` flag for ingress controllers not in the supported list, create and register Git integrations using these commands: + `cf integration git add default --runtime --api-url ` + `cf integration git register default --runtime --token ` + + +{::nomarkdown} +
            +{:/} + + + +## Hybrid GitOps Runtime installation flags +This section describes the required and optional flags to install a Hybrid GitOps Runtime. +For documentation purposes, the flags are grouped into: +* Runtime flags, relating to Runtime, cluster, and namespace requirements +* Ingress-less flags, for tunnel-based installation +* Ingress-controller flags, for ingress-based installation +* Git provider and repo flags +* Codefresh resource flags + +{::nomarkdown} +
            +{:/} + +### Runtime flags + +**Runtime name** +Required. +The Runtime name must start with a lower-case character, and can include up to 62 lower-case characters and numbers. +* CLI wizard: Add when prompted. +* Silent install: Add the `--runtime` flag and define the name. + +**Namespace resource labels** +Optional. +The label of the namespace resource to which you are installing the Hybrid Runtime. Labels are required to identify the networks that need access during installation, as is the case when using services meshes, such as Istio for example. + +* CLI wizard and Silent install: Add the `--namespace-labels` flag, and define the labels in `key=value` format. Separate multiple labels with `commas`. + +**Kube context** +Required. +The cluster defined as the default for `kubectl`. If you have more than one Kube context, the current context is selected by default. + +* CLI wizard: Select the Kube context from the list displayed. +* Silent install: Explicitly specify the Kube context with the `--context` flag. + +**Access mode** +The access mode for the runtime, which can be one of the following: +* [Tunnel-based]({{site.baseurl}}/docs/installation/runtime-architecture/#tunnel-based-hybrid-gitops-runtime-architecture), for runtimes without ingress controllers. This is the default. +* [Ingress-based]({{site.baseurl}}/docs/getting-started/architecture/#ingress-based-hybrid-gitops-runtime-architecture) for runtimes with ingress contollers. + + +* CLI wizard: Select the access mode from the list displayed. +* Silent install: + * For tunnel-based, see [Tunnel-based runtime flags](#tunnel-based-runtime-flags) + * For ingress-based, add the [Ingress controller flags](#ingress-controller-flags) + + >If you don't specify any flags, tunnel-based access is automatically selected. + +**Shared configuration repository** +The Git repository per Runtime account with shared configuration manifests. +* CLI wizard and Silent install: Add the `--shared-config-repo` flag and define the path to the shared repo. + +{::nomarkdown} +
            +{:/} + +### Tunnel-based runtime flags +These flags are required to install tunnel-based Hybrid Runtimes, without an ingress controller. + +**IP allowlist** + +Optional. + +The allowed list of IPs from which to forward requests to the internal customer cluster for ingress-less runtime installations. The allowlist can include IPv4 and IPv6 addresses, with/without subnet and subnet masks. Multiple IPs must be separated by commas. + +When omitted, all incoming requests are authenticated regardless of the IPs from which they originated. + +* CLI wizard and Silent install: Add the `--ips-allow-list` flag, followed by the IP address, or list of comma-separated IPs to define more than one. For example, `--ips-allow-list 77.126.94.70/16,192.168.0.0` + +{::nomarkdown} +
            +{:/} + +### Ingress controller flags + + +**Skip ingress** +Required, if you are using an unsupported ingress controller. +For unsupported ingress controllers, bypass installing ingress resources with the `--skip-ingress` flag. +In this case, after completing the installation, manually configure the cluster's routing service, and create and register Git integrations. See the last step in [Install the Hybrid GitOps Runtime](#install-hybrid-gitops-runtime). + +**Ingress class** +Required. + +* CLI wizard: Select the ingress class for Runtime installation from the list displayed. +* Silent install: Explicitly specify the ingress class through the `--ingress-class` flag. Otherwise, Runtime installation fails. + +**Ingress host** +Required. +The IP address or host name of the ingress controller component. + +* CLI wizard: Automatically selects and displays the host, either from the cluster or the ingress controller associated with the **Ingress class**. +* Silent install: Add the `--ingress-host` flag. If a value is not provided, takes the host from the ingress controller associated with the **Ingress class**. + > Important: For AWS ALB, the ingress host is created post-installation. However, when prompted, add the domain name you will create in `Route 53` as the ingress host. + +**Insecure ingress hosts** +TLS certificates for the ingress host: +If the ingress host does not have a valid TLS certificate, you can continue with the installation in insecure mode, which disables certificate validation. + +* CLI wizard: Automatically detects and prompts you to confirm continuing the installation in insecure mode. +* Silent install: To continue with the installation in insecure mode, add the `--insecure-ingress-host` flag. + +**Internal ingress host** +Optional. +Enforce separation between internal (app-proxy) and external (webhook) communication by adding an internal ingress host for the app-proxy service in the internal network. +For both CLI wizard and Silent install: + +* For new Runtime installations, add the `--internal-ingress-host` flag pointing to the ingress host for `app-proxy`. +* For existing installations, commit changes to the installation repository by modifying the `app-proxy ingress` and `.yaml` + See [(Optional) Internal ingress host configuration for existing Hybrid Runtimes](#optional-internal-ingress-host-configuration-for-existing-hybrid-runtimes). + +{::nomarkdown} +
            +{:/} + + + +### Git provider and repo flags +The Git provider defined for the Runtime. + +>Because Codefresh creates a [shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) for the Runtimes in your account, the Git provider defined for the first Runtime you install in your account is used for all the other Runtimes in the same account. + +You can define any of the following Git providers: +* GitHub: + * [GitHub](#github) (the default Git provider) + * [GitHub Enterprise](#github-enterprise) +* GitLab: + * [GitLab Cloud](#gitlab-cloud) + * [GitLab Server](#gitlab-server) +* Bitbucket: + * [Bitbucket Cloud](#bitbucket-cloud) + * [Bitbucket Server](#bitbucket-server) + +{::nomarkdown} +
            +{:/} + + + +#### GitHub +GitHub is the default Git provider for Hybrid Runtimes. Being the default provider, for both the CLI wizard and Silent install, you need to provide only the repository URL and the Git runtime token. + +> For the required scopes, see [GitHub and GitHub Enterprise Runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). + +`--repo --git-token ` + +where: +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. Copy the clone URL from your GitHub website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). + If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during the installation. + + Repo URL format: + `https://github.com//reponame>.git[/subdirectory][?ref=branch]` + where: + * `/` is your username or organization name, followed by the name of the repo, identical to the HTTPS clone URL. For example, `https://github.com/nr-codefresh/codefresh.io.git`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://github.com/nr-codefresh/codefresh.io.git/runtimes/defs?ref=codefresh-prod` +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). + +{::nomarkdown} +
            +{:/} + +#### GitHub Enterprise + +> For the required scopes, see [GitHub and GitHub Enterprise runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). + + +`--provider github --repo --git-token ` + +where: +* `--provider github` (required), defines GitHub Enterprise as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitHub Enterprise website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). + If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during the installation. + Repo URL format: + + `https://ghe-trial.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` + where: + * `/` is your username or organization name, followed by the name of the repo. For example, `codefresh-io/codefresh.io.git`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://ghe-trial.devops.cf-cd.com/codefresh-io/codefresh.io.git/runtimes/defs?ref=codefresh-prod` +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + +#### GitLab Cloud +> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). + + +`--provider gitlab --repo --git-token ` + +where: +* `--provider gitlab` (required), defines GitLab Cloud as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git project for the Runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitLab website. + If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during the installation. + + > Important: You must create the group with access to the project prior to the installation. + + Repo URL format: + + `https://gitlab.com//.git[/subdirectory][?ref=branch]` + where: + * `` is either your username, or if your project is within a group, the front-slash separated path to the project. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) + * `` is the name of the project. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Examples: + `https://gitlab.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) + + `https://gitlab.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) + +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + + +#### GitLab Server + +> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). + +`--provider gitlab --repo --git-token ` + +where: +* `--provider gitlab` (required), defines GitLab Server as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. + If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during the installation. + + > Important: You must create the group with access to the project prior to the installation. + + Repo URL format: + `https://gitlab-onprem.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` + where: + * `` is your username, or if the project is within a group or groups, the name of the group. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) + * `` is the name of the project. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Examples: + `https://gitlab-onprem.devops.cf-cd.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) + + `https://gitlab-onprem.devops.cf-cd.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) + +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + +#### Bitbucket Cloud +> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). + + +`--provider bitbucket --repo --git-user --git-token ` + +where: +* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. + If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during Runtime installation. + >Important: Remove the username, including @ from the copied URL. + + Repo URL format: + + `https://bitbucket.org.git[/subdirectory][?ref=branch]` + where: + * `` is your workspace ID. For example, `nr-codefresh`. + * `` is the name of the repository. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://bitbucket.org/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` +* `--git-user ` (required), is your username for the Bitbucket Cloud account. +* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + +#### Bitbucket Server + +> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). + + +`--provider bitbucket-server --repo --git-user --git-token ` + +where: +* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. + If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh then creates the project during the installation. + >Important: Remove the username, including @ from the copied URL. + + Repo URL format: + + `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm//.git[/subdirectory][?ref=branch]` + where: + * `` is your username or organization name. For example, `codefresh-io.`. + * `` is the name of the repo. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm/codefresh-io/codefresh.git/runtimes/defs?ref=codefresh-prod` +* `--git-user ` (required), is your username for the Bitbucket Server account. +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). + +{::nomarkdown} +

            +{:/} + +### Codefresh resource flags +**Codefresh demo resources** +Optional. +Install demo pipelines to use as a starting point to create your own GitOps pipelines. We recommend installing the demo resources as these are used in our quick start tutorials. + +* Silent install: Add the `--demo-resources` flag, and define its value as `true` (default), or `false`. For example, `--demo-resources=true` + +**Insecure flag** +For _on-premises installations_, if the Ingress controller does not have a valid SSL certificate, to continue with the installation, add the `--insecure` flag to the installation command. + +{::nomarkdown} +

            +{:/} + + + + + + + +## (Optional) Internal ingress host configuration for existing Hybrid GitOps Runtimes +If you already have provisioned Hybrid GitOps Runtimes, to use an internal ingress host for app-proxy communication and an external ingress host to handle webhooks, change the specs for the `Ingress` and `Runtime` resources in the Runtime installation repository. Use the examples as guidelines. + +`/apps/app-proxy/overlays//ingress.yaml`: change `host` + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: codefresh-cap-app-proxy + namespace: codefresh #replace with your runtime name +spec: + ingressClassName: nginx + rules: + - host: my-internal-ingress-host # replace with the internal ingress host for app-proxy + http: + paths: + - backend: + service: + name: cap-app-proxy + port: + number: 3017 + path: /app-proxy/ + pathType: Prefix +``` + +`..//bootstrap/.yaml`: add `internalIngressHost` + +```yaml +apiVersion: v1 +data: + base-url: https://g.codefresh.io + runtime: | + apiVersion: codefresh.io/v1alpha1 + kind: Runtime + metadata: + creationTimestamp: null + name: codefresh #replace with your runtime name + namespace: codefresh #replace with your runtime name + spec: + bootstrapSpecifier: github.com/codefresh-io/cli-v2/manifests/argo-cd + cluster: https://7DD8390300DCEFDAF87DC5C587EC388C.gr7.us-east-1.eks.amazonaws.com + components: + - isInternal: false + name: events + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/argo-events + wait: true + - isInternal: false + name: rollouts + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/argo-rollouts + wait: false + - isInternal: false + name: workflows + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/argo-workflows + wait: false + - isInternal: false + name: app-proxy + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/app-proxy + wait: false + defVersion: 1.0.1 + ingressClassName: nginx + ingressController: k8s.io/ingress-nginx + ingressHost: https://support.cf.com/ + internalIngressHost: https://my-internal-ingress-host # add this line and replace my-internal-ingress-host with your internal ingress host + repo: https://github.com/NimRegev/my-codefresh.git + version: 99.99.99 +``` + + +## Related articles +[Add external clusters to Hybrid and Hosted Runtimes]({{site.baseurl}}/docs/installation/managed-cluster/) +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/monitor-manage-runtimes/) +[Add Git Sources to runtimes]({{site.baseurl}}/docs/installation/git-sources/) +[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) +[Troubleshoot Hybrid Runtime installation]({{site.baseurl}}/installation/troubleshooting/runtime-issues/) diff --git a/_docs/runtime/managed-cluster.md b/_docs/installation/gitops/managed-cluster.md similarity index 65% rename from _docs/runtime/managed-cluster.md rename to _docs/installation/gitops/managed-cluster.md index 25ae4546e..bb5a756b4 100644 --- a/_docs/runtime/managed-cluster.md +++ b/_docs/installation/gitops/managed-cluster.md @@ -1,42 +1,43 @@ --- -title: "Add external clusters to runtimes" -description: "" -group: runtime +title: "Add external clusters to GitOps Runtimes" +description: "Manage multiple remote clusters with single GitOps Runtime" +group: installation +sub_group: gitops toc: true --- -Register external clusters to provisioned hybrid or hosted runtimes in Codefresh. Once you add an external cluster, you can deploy applications to that cluster without having to install Argo CD in order to do so. External clusters allow you to manage multiple clusters through a single runtime. +Register external clusters to provisioned Hybrid or Hosted GitOps Runtimes in Codefresh. Once you add an external cluster, you can deploy applications to that cluster without having to install Argo CD on the clusters in order to do so. Manage multiple external clusters through a single Runtime. -When you add an external cluster to a provisioned runtime, the cluster is registered as a managed cluster. A managed cluster is treated as any other managed K8s resource, meaning that you can monitor its health and sync status, deploy applications on the cluster and view information in the Applications dashboard, and remove the cluster from the runtime's managed list. +When you add an external cluster to a provisioned GitOps Runtime, the cluster is registered as a managed cluster. A managed cluster is treated as any other managed K8s resource, meaning that you can monitor its health and sync status, deploy applications to it, view information in the Applications dashboard, and remove the cluster from the Runtime's managed list. Add managed clusters through: -* Codefresh CLI +* GitOps CLI * Kustomize -Adding a managed cluster via Codefresh ensures that Codefresh applies the required RBAC resources (`ServiceAccount`, `ClusterRole` and `ClusterRoleBinding`) to the target cluster, creates a `Job` that updates the selected runtime with the information, registers the cluster in Argo CD as a managed cluster, and updates the platform with the new cluster information. +Adding a managed cluster via Codefresh ensures that Codefresh applies the required RBAC resources (`ServiceAccount`, `ClusterRole` and `ClusterRoleBinding`) to the target cluster, creates a `Job` that updates the selected Runtime with the information, registers the cluster in Argo CD as a managed cluster, and updates the platform with the new cluster information. -### Add a managed cluster with Codefresh CLI -Add an external cluster to a provisioned runtime through the Codefresh CLI. When adding the cluster, you can also add labels and annotations to the cluster, which are added to the cluster secret created by Argo CD. +## Add a managed cluster with GitOps CLI +Add an external cluster to a provisioned GitOps Runtime through the GitOps CLI. When adding the cluster, you can also add labels and annotations to the cluster, which are added to the cluster secret created by Argo CD. Optionally, to first generate the YAML manifests, and then manually apply them, use the `dry-run` flag in the CLI. **Before you begin** - -* For _hosted_ runtimes: [Configure access to these IP addresses]({{site.baseurl}}/docs/administration/platform-ip-addresses/) +* For _Hosted GitOps_ Runtimes: [Configure access to these IP addresses]({{site.baseurl}}/docs/administration/platform-ip-addresses/) * Verify that: - * Your Git personal access token is valid and has the correct permissions - * You have installed the latest version of the Codefresh CLI + * Your Git personal access token is valid and has the [required scopes]({{site.baseurl}}/docs/reference/git-tokens) + * You have installed the [latest version of the Codefresh CLI]({{site.baseurl}}/docs/clients/#upgrade-gitops-cli) **How to** -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. From either the **Topology** or **List** views, select the runtime to which to add the cluster. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. From either the **Topology** or **List** views, select the Runtime to which to add the cluster. 1. Topology View: Select {::nomarkdown}{:/}. List View: Select the **Managed Clusters** tab, and then select **+ Add Cluster**. 1. In the Add Managed Cluster panel, copy and run the command: - `cf cluster add [--labels label-key=label-value] [--annotations annotation-key=annotation-value][--dry-run]` + `cf cluster add [runtime-name] [--labels label-key=label-value] [--annotations annotation-key=annotation-value][--dry-run]` where: + * `runtime-name` is the name of the Runtime to which to add the cluster. * `--labels` is optional, and required to add labels to the cluster. When defined, add a label in the format `label-key=label-value`. Separate multiple labels with `commas`. * `--annotations` is optional, and required to add annotations to the cluster. When defined, add an annotation in the format `annotation-key=annotation-value`. Separate multiple annotations with `commas`. * `--dry-run` is optional, and required if you want to generate a list of YAML manifests that you can redirect and apply manually with `kubectl`. @@ -54,7 +55,7 @@ Optionally, to first generate the YAML manifests, and then manually apply them, {:start="5"} 1. If you used `dry-run`, apply the generated manifests to the same target cluster on which you ran the command. - Here is an example of the YAML manifest generated with the `--dry-run` flag. Note that there are placeholders in the example, which are replaced with the actual values with `--dry-run`. + Here is an example of the YAML manifest generated with the `--dry-run` flag. Note that the example has placeholders, which are replaced with the actual values during the `--dry-run`. ```yaml @@ -177,9 +178,9 @@ spec: ``` -The new cluster is registered to the runtime as a managed cluster. +The new cluster is registered to the GitOps Runtime as a managed cluster. -### Add a managed cluster with Kustomize +## Add a managed cluster with Kustomize Create a `kustomization.yaml` file with the information shown in the example below, and run `kustomize build` on it. ```yaml @@ -222,16 +223,20 @@ resources: ``` -### Work with managed clusters -Work with managed clusters in hybrid or hosted runtimes in either the Topology or List runtime views. For information on runtime views, see [Runtime views]({{site.baseurl}}/docs/runtime/runtime-views). -As the cluster is managed through the runtime, updates to the runtime automatically updates the components on all the managed clusters that include it. +## Work with managed clusters +Work with managed clusters in either the Topology or List Runtime views. For information on Runtime views, see [Runtime views]({{site.baseurl}}/docs/installation/monitor-manage-runtimes/#gitops-runtime-views). +As the cluster is managed through the Runtime, updates to the Runtime automatically updates the components on all the managed clusters that include it. View connection status for the managed cluster, and health and sync errors. Health and sync errors are flagged by the error notification in the toolbar, and visually flagged in the List and Topology views. -#### Install Argo Rollouts -Install Argo Rollouts directly from Codefresh with a single click to visualize rollout progress in the [Applications dashboard]({{site.baseurl}}/docs/deployment/applications-dashboard/). If Argo Rollouts has not been installed, an **Install Argo Rollouts** button is displayed on selecting the managed cluster. +### Install Argo Rollouts +Applications with `rollout` resources need Argo Rollouts on the target cluster, both to visualize rollouts in the Applications dashboard and control rollout steps with the Rollout Player. +If Argo Rollouts has not been installed on the target cluster, it displays **Install Argo Rollouts** button. + +Install Argo Rollouts with a single click to execute rollout instructions, deploy the application, and visualize rollout progress in the [Applications dashboard]({{site.baseurl}}/docs/deployment/gitops/applications-dashboard/). + -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. 1. Select **Topology View**. 1. Select the target cluster, and then select **+ Install Argo Rollouts**. @@ -246,16 +251,16 @@ Install Argo Rollouts directly from Codefresh with a single click to visualize r %} -#### Remove a managed cluster from the Codefresh UI -Remove a cluster from the runtime's list of managed clusters from the Codefresh UI. +### Remove a managed cluster from the Codefresh UI +Remove a cluster from the Runtime's list of managed clusters from the Codefresh UI. > You can also remove it through the CLI. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +In the Codefresh UI, on the toolbar, click the **Settings** icon, expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. 1. Select either the **Topology View** or the **List View** tabs. 1. Do one of the following: - * In the Topology View, select the cluster node from the runtime it is registered to. - * In the List View, select the runtime, and then select the **Managed Clusters** tab. + * In the Topology View, select the cluster node from the Runtime it is registered to. + * In the List View, select the Runtime, and then select the **Managed Clusters** tab. 1. Select the three dots next to the cluster name, and then select **Uninstall** (Topology View) or **Remove** (List View). {% include @@ -269,8 +274,8 @@ Remove a cluster from the runtime's list of managed clusters from the Codefresh %} -#### Remove a managed cluster through the Codefresh CLI -Remove a cluster from the list managed by the runtime, through the CLI. +### Remove a managed cluster through the GitOps CLI +Remove a cluster from the list managed by the GitOps Runtime, through the GitOps CLI. * Run: `cf cluster remove --server-url ` @@ -279,7 +284,6 @@ Remove a cluster from the list managed by the runtime, through the CLI. `` is the URL of the server on which the managed cluster is installed. -### Related articles -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Manage provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[(Hybrid) Monitor provisioned runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) \ No newline at end of file +## Related articles +[Add Git Sources to GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/git-sources/) +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/monitor-manage-runtimes/) diff --git a/_docs/installation/gitops/monitor-manage-runtimes.md b/_docs/installation/gitops/monitor-manage-runtimes.md new file mode 100644 index 000000000..d67609286 --- /dev/null +++ b/_docs/installation/gitops/monitor-manage-runtimes.md @@ -0,0 +1,642 @@ +--- +title: "Monitoring & managing GitOps Runtimes" +description: "Optimize GitOps Runtimes" +group: runtime +sub_group: gitops +redirect_from: + - /monitor-manage-runtimes/ + - /monitor-manage-runtimes +toc: true +--- + + +The **Runtimes** page displays the provisioned GitOps Runtimes in your account, both Hybrid, and the Hosted Runtime if you have one. + +View Runtime components and information in List or Topology view formats to manage and monitor them. + +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view.png" + url="/images/runtime/runtime-list-view.png" + alt="Runtime List View" + caption="Runtime List View" + max-width="70%" +%} + +Manage provisioned GitOps Runtimes: +* [Add managed clusters to GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/managed-cluster/) +* [Add and manage Git Sources for GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/git-sources/) +* [Upgrade GitOps CLI](#hybrid-gitops-upgrade-provisioned-runtimes) +* Upgrade Hybrid GitOps Runtimes +* Uninstall GitOps Runtimes + + + +Monitor provisioned GitOps Runtimes for security, health, and sync errors: + +* (Hybrid and Hosted) View/download logs for Runtimes and for Runtime components +* (Hybrid) Restore provisioned Runtimes +* (Hybrid) Configure browsers to allow access to insecure Runtimes +* (Hybrid) Monitor notifications in the Activity Log + + +> Unless specified otherwise, all options are common to both types of GitOps Runtimes. If an option is valid only for Hybrid GitOps, it is indicated as such. + + +## GitOps Runtime views + +View provisioned GitOps Runtimes in List or Topology view formats. + +* List view: The default view, displays the list of provisioned Runtimes, the clusters managed by them, and Git Sources associated with them. +* Topology view: Displays a hierarchical view of Runtimes and the clusters managed by them, with health and sync status of each cluster. + +### List view + +The List view is a grid-view of the provisioned Runtimes. + +Here is an example of the List view for runtimes. +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view.png" + url="/images/runtime/runtime-list-view.png" + alt="Runtime List View" + caption="Runtime List View" + max-width="70%" +%} + +Here is a description of the information in the List View. + +{: .table .table-bordered .table-hover} +| List View Item| Description | +| -------------- | ---------------- | +|**Name**| The name of the provisioned GitOps Runtime. | +|**Type**| The type of GitOps Runtime provisioned, and can be **Hybrid** or **Hosted**. | +|**Cluster/Namespace**| The K8s API server endpoint, as well as the namespace with the cluster. | +|**Modules**| The modules installed based on the type of provisioned Runtime. Hybrid Runtimes include CI amnd CD Ops modules. Hosted runtimes include CD Ops. | +|**Managed Cluster**| The number of managed clusters if any, for the runtime. To view list of managed clusters, select the runtime, and then the **Managed Clusters** tab. To work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster).| +|**Version**| The version of the runtime currently installed. **Update Available!** indicates there are later versions of the runtime. To see all the commits to the runtime, mouse over **Update Available!**, and select **View Complete Change Log**. +|**Last Updated**| The most recent update information from the runtime to the Codefresh platform. Updates are sent to the platform typically every few minutes. Longer update intervals may indicate networking issues.| +|**Sync Status**| The health and sync status of the runtime or cluster. {::nomarkdown}
            • indicates health or sync errors in the runtime, or a managed cluster if one was added to the runtime.
              The runtime name is colored red.
            • indicates that the runtime is being synced to the cluster on which it is provisioned.
            {:/} | + +### Topology view + +A hierachical visualization of the provisioned Runtimes. The Topology view makes it easy to identify key information such as versions, health and sync status, for both the provisioned Runtime and the clusters managed by it. +Here is an example of the Topology view for Runtimes. + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-topology-view.png" + url="/images/runtime/runtime-topology-view.png" + alt="Runtime Topology View" + caption="Runtime Topology View" + max-width="60%" +%} + +Here is a description of the information in the Topology view. + +{: .table .table-bordered .table-hover} +| Topology View Item | Description | +| ------------------------| ---------------- | +|**Runtime** | ![](../../../images/icons/codefresh-runtime.png?display=inline-block) the provisioned Runtime. Hybrid Runtimes display the name of the K8s API server endpoint with the cluster. Hosted Runtimes display 'hosted'. | +|**Cluster** | The local, and managed clusters if any, for the Runtime. {::nomarkdown}
            • indicates the local cluster, always displayed as `in-cluster`. The in-cluster server URL is always set to `https://kubernetes.default.svc/`.
            • indicates a managed cluster.
            • select to add a new managed cluster.
            {:/} To view cluster components, select the cluster. To add and work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/installation/gitops/managed-cluster). | +|**Health/Sync status** |The health and sync status of the Runtime or cluster. {::nomarkdown}
            • indicates health or sync errors in the Runtime, or a managed cluster if one was added to the runtime.
              The runtime or cluster node is bordered in red and the name is colored red.
            • indicates that the Runtime is being synced to the cluster on which it is provisioned.
            {:/} | +|**Search and View options** | {::nomarkdown}
            • Find a Runtime or its clusters by typing part of the Runtime/cluster name, and then navigate to the entries found.
            • Topology view options: Resize to window, zoom in, zoom out, full screen view.
            {:/}| + +## Managing provisioned GitOps Runtimes +* [Reset shared configuration repository for GitOps Runtimes](#reset-shared-configuration-repository-for-gitops-runtimes) +* [(Hybrid GitOps) Upgrade GitOps CLI](#hybrid-gitops-upgrade-gitops-cli) +* [(Hybrid GitOps) Upgrade provisioned Runtimes](#hybrid-gitops-upgrade-provisioned-runtimes) +* [Uninstall provisioned GitOps Runtimes](#uninstall-provisioned-gitops-runtimes) +* [Update Git tokens for Runtimes](#update-git-tokens-for-runtimes) + +### Reset shared configuration repository for GitOps Runtimes +Codefresh creates the [shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration) when you install the first hybrid or hosted GitOps runtime for your account, and uses it for all runtimes you add to the same account. + +If needed, you can reset the location of the shared configuration repository in your account and re-initialize it. For example, when moving from evaluation to production. +Uninstall all the existing runtimes in your account, and then run the reset command. On the next installation, Codefresh re-initializes the shared configuration repo. + +**Before you begin** +[Uninstall every runtime in the account](#uninstall-provisioned-gitops-runtimes) + +**How to** +* Run: + `cf config --reset-shared-config-repo` + +### (Hybrid GitOps) Upgrade GitOps CLI +Upgrade the CLI to the latest version to prevent Runtime installation errors. + +1. Check the version of the CLI you have installed: + `cf version` +1. Compare with the [latest version](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"} released by Codefresh. +1. Select and run the appropriate command: + +{: .table .table-bordered .table-hover} +| Download mode | OS | Commands | +| -------------- | ----------| ----------| +| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| +| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | +| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | +| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| +| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`| + +### (Hybrid GitOps) Upgrade provisioned Runtimes + +Upgrade provisioned Hybrid Runtimes to install critical security updates or the latest versions of all components. Upgrade a provisioned Hybrid Runtime by running a silent upgrade or through the GitOps CLI wizard. +If you have managed clusters for Hybrid GitOps, upgrading the Runtime automatically updates runtime components within the managed cluster as well. + +> When there are security updates, the UI displays the alert, _At least one runtime requires a security update_. The Version column displays an _Update Required!_ notification. + +> If you have older Hybrid Runtime versions, upgrade to manually define or create the shared configuration repo for your account. See [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/). + + +**Before you begin** +For both silent or CLI-wizard based upgrades, make sure you have: + +* The latest version of the Codefresh CLI +* A valid Git token with [the required scopes]({{site.baseurl}}/docs/reference/git-tokens) + +**Silent upgrade** + +* Pass the mandatory flags in the upgrade command: + + `cf runtime upgrade --git-token --silent` + where: + `` is a valid Git token with the correct scopes. + +**CLI wizard-based upgrade** + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Switch to either the **List View** or to the **Topology View**. +1. **List view**: + * Select the Runtime name. + * To see all the commits to the Runtime, in the Version column, mouse over **Update Available!**, and select **View Complete Change Log**. + * On the top-right, select **Upgrade**. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view-upgrade.png" + url="/images/runtime/runtime-list-view-upgrade.png" + alt="List View: Upgrade runtime option" + caption="List View: Upgrade runtime option" + max-width="30%" + %} + + **Topology view**: + Select the Runtime cluster, and from the panel, select the three dots and then select **Upgrade Runtime**. + {% include + image.html + lightbox="true" + file="/images/runtime/runtiime-topology-upgrade.png" + url="/images/runtime/runtiime-topology-upgrade.png" + alt="Topology View: Upgrade runtime option" + caption="Topology View: Upgrade runtime option" + max-width="30%" +%} + +{:start="4"} + +1. If you have already installed the GitOps CLI, in the Install Upgrades panel, copy the upgrade command. + + {% include + image.html + lightbox="true" + file="/images/runtime/install-upgrades.png" + url="/images/runtime/install-upgrades.png" + alt="Upgrade runtime" + caption="Upgrade runtime panel" + max-width="30%" +%} + +{:start="5"} +1. In your terminal, paste the command, and do the following: + * Update the Git token value. + * To manually define the shared configuration repo, add the `--shared-config-repo` flag with the path to the repo. +1. Confirm to start the upgrade. + + + + + + +### Uninstall provisioned GitOps Runtimes + +Uninstall provisioned GitOps Runtimes that are not in use, through a silent uninstall or through the GitOps CLI wizard. +> Uninstalling a Runtime removes the Git Sources and managed clusters associated with it. + +**Before you begin** +For both types of uninstalls, make sure you have: + +* The latest version of the GitOps CLI +* A valid runtime Git token +* The Kube context from which to uninstall the provisioned Runtime + +**Silent uninstall** +Pass the mandatory flags in the uninstall command: + `cf runtime uninstall --git-token --silent` + where: + `--git-token` is a valid runtime token with the `repo` and `admin-repo.hook` scopes. + +**GitOps CLI wizard uninstall** + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Switch to either the **List View** or to the **Topology View**. +1. **List view**: On the top-right, select the three dots and then select **Uninstall**. + + {% include + image.html + lightbox="true" + file="/images/runtime/uninstall-location.png" + url="/images/runtime/uninstall-location.png" + alt="List View: Uninstall runtime option" + caption="List View: Uninstall runtime option" + max-width="30%" +%} + +**Topology view**: Select the Runtime node, and from the panel, select the three dots and then select **Uninstall Runtime**. + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-topology-uninstall.png" + url="/images/runtime/runtime-topology-uninstall.png" + alt="Topology View: Uninstall runtime option" + caption="Topology View: Uninstall runtime option" + max-width="30%" +%} + +{:start="4"} + +1. If you already have the latest version of the GitOps CLI, in the Uninstall Codefresh Runtime panel, copy the uninstall command. + + {% include + image.html + lightbox="true" + file="/images/runtime/uninstall.png" + url="/images/runtime/uninstall.png" + alt="Uninstall Codefresh runtime" + caption="Uninstall Codefresh runtime" + max-width="40%" +%} + +{:start="5"} + +1. In your terminal, paste the command, and update the Git token value. +1. Select the Kube context from which to uninstall the Runtime, and then confirm the uninstall. +1. If you get errors, run the uninstall command again, with the `--force` flag. + + + +### Update Git tokens for Runtimes + +Provisioned Runtimes require valid Git tokens at all times to authenticate Git actions by you as a user. +>These tokens are specific to the user, and the same can be used for multiple runtimes. + +There are two different situations when you need to update Git tokens: +* Update invalid, revoked, or expired tokens: Codefresh automatically flags Runtimes with such tokens. It is mandatory to update the Git tokens to continue working with the platform. +* Update valid tokens: Optional. You may want to update Git tokens, even valid ones, by deleting the existing token and replacing it with a new token. + +The methods for updating any Git token are the same regardless of the reason for the update: +* OAuth2 authorization, if your admin has registered an OAuth Application for Codefresh +* Git access token authentication, by generating a personal access token in your Git provider account with the correct scopes + +**Before you begin** +* To authenticate through a Git access token, make sure your token is valid and has [the required scopes]({{site.baseurl}}/docs/reference/git-tokens) + +**How to** +1. Do one of the following: + * If you see a notification in the Codefresh UI about invalid Runtime tokens, click **[Update Token]**. + The GitOps Runtimes page shows runtimes with invalid tokens prefixed by the key icon. Mouse over shows invalid token. + * To update an existing token, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Select the GitOps Runtime for which to update the Git token. +1. From the context menu with the additional actions at the top-right, select **Update Git Runtime token**. + + {% include + image.html + lightbox="true" + file="/images/runtime/update-git-runtime-token.png" + url="/images/runtime/update-git-runtime-token.png" + alt="Update Git runtime token option" + caption="Update Git runtime token option" + max-width="40%" +%} + +{:start="4"} +1. Do one of the following: + * If your admin has set up OAuth access, click **Authorize Access to Git Provider**. Go to _step 5_. + * Alternatively, authenticate with an access token from your Git provider. Go to _step 6_. + +{:start="5"} +1. For OAuth2 authorization: + > If the application is not registered, you get an error. Contact your admin for help. + * Enter your credentials, and select **Sign In**. + * If required, as for example if two-factor authentication is configured, complete the verification. + + {% include + image.html + lightbox="true" + file="/images/administration/user-settings/oauth-user-authentication.png" + url="/images/administration/user-settings/oauth-user-authentication.png" + alt="Authorizing access with OAuth2" + caption="Authorizing access with OAuth2" + max-width="30%" + %} + +{:start="6"} +1. For Git token authentication, expand **Advanced authorization options**, and then paste the generated token in the **Git runtime token** field. + +1. Click **Update Token**. + +## Monitoring GitOps Runtimes +* [View/download logs to troubleshoot Runtimes](#viewdownload-logs-to-troubleshoot-runtimes) +* [(Hybrid GitOps) Restoring provisioned Runtimes](#hybrid-gitops-restoring-provisioned-runtimes) +* [(Hybrid GitOps) Configure browser to allow insecure Runtimes](#hybrid-gitops-configure-browser-to-allow-insecure-runtimes) +* [(Hybrid GitOps) View notifications in Activity Log](#hybrid-gitops-view-notifications-in-activity-log) +* [(Hybrid GitOps) Troubleshoot health and sync errors for Runtimes](#hybrid-gitops-troubleshoot-health-and-sync-errors-for-runtimes) + +### View/download logs to troubleshoot GitOps Runtimes +Logs are available for completed Runtimes, both for the runtime and for individual runtime components. Download log files for offline viewing and analysis, or view online logs for a Runtime component, and download if needed for offline analysis. Online logs support free-text search, search-result navigation, and line-wrap for enhanced readability. + +Log files include events from the date of the application launch, with the newest events listed first. + +{::nomarkdown} +

            +{:/} + +#### Download logs for GitOps Runtimes +Download the log file for a Runtime. The Runtime log is downloaded as a `.tar.gz` file, which contains the individual log files for each runtime component. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. If needed, switch to **List View**, and then select the runtime for which to download logs. +1. From the context menu, select **Download All Logs**. + The log file is downloaded to the Downloads folder or the folder designated for downloads, with the filename, `.tar.gz`. For example, `codefreshv2-production2.tar.gz`. + + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-download-all.png" + url="/images/runtime/runtime-logs-download-all.png" + alt="Download logs for selected runtime" + caption="Download logs for selected runtime" + max-width="40%" +%} + + +{:start="4"} +1. To view the log files of the individual components, unzip the file. + Here is an example of the folder with the individual logs. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-folder-view.png" + url="/images/runtime/runtime-logs-folder-view.png" + alt="Individual log files in folder" + caption="Individual log files in folder" + max-width="50%" +%} + +{:start="5"} +1. Open a log file with the text editor of your choice. + +{::nomarkdown} +

            +{:/} + +#### View/download logs for Runtime components +View online logs for any Runtime component, and if needed, download the log file for offline viewing and analysis. + +Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events, from the application launch to the date and time of download. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, expand Runtimes in the sidebar, and select [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. If needed, switch to **List View**, and then select the Runtime. +1. Select the Runtime component and then select **View Logs**. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-view-component.png" + url="/images/runtime/runtime-logs-view-component.png" + alt="View log option for individual runtime component" + caption="View log option for individual runtime component" + max-width="40%" +%} + + +{:start="4"} +1. Do the following: + * Search by free-text for any string, and click the next and previous buttons to navigate between the search results. + * To switch on line-wrap for readability, click **Wrap**. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-screen-view.png" + url="/images/runtime/runtime-logs-screen-view.png" + alt="Runtime component log example" + caption="Runtime component log example" + max-width="50%" +%} + +{:start="5"} +1. To download the log, click **Download**. + The file is downloaded as `.log`. + +### (Hybrid GitOps) Restoring provisioned Runtimes + +In case of cluster failure, restore the provisioned Hybrid Runtime from the existing runtime installation repository. +For partial or complete cluster failures, you can restore the Runtime to either the failed cluster or to a different cluster. +Restoring the provisioned Runtime reinstalls it, leveraging the resources in the existing Runtime repo. + +Restoring the runtime: +* Applies `argo-cd` from the installation manifests in your repo to your cluster +* Associates `argo-cd` with the existing installation repo +* Applies the Runtime and `argo-cd` secrets to the cluster +* Updates the Runtime config map (`.yaml` in the `bootstrap` directory) with the new cluster configuration for these fields: + `cluster` + `ingressClassName` + `ingressController` + `ingressHost` + +{::nomarkdown} +

            +{:/} + +#### Restore a Hybrid Runtime +Reinstall the Hybrid Runtime from the existing installation repository to restore it to the same or a different cluster. + +**Before you begin** + +* Have the following information handy: + > All values must be the identical to the Runtime to be restored. + * Runtime name + * Repository URL + * Codefresh context + * Kube context: Required if you are restoring to the same cluster + +**How to** + +1. Run: + `cf runtime install --from-repo` +1. Provide the relevant values when prompted. +1. If you are performing the runtime recovery in a different cluster, verify the ingress resource configuration for `app-proxy`, `workflows`, and `default-git-source`. + If the health status remains as `Progressing`, do the following: + + * In the Runtime installation repo, check if the `ingress.yaml` files for the `app-proxy` and `workflows` are configured with the correct `host` and `ingressClassName`: + + `apps/app-proxy/overlays//ingress.yaml` + `apps/workflows/overlays//ingress.yaml` + + * In the Git Source repository, check the `host` and `ingressClassName` in `cdp-default-git-source.ingress.yaml`: + + `resources_/cdp-default-git-source.ingress.yaml` + + See the [example](#ingress-example) below. + +{:start="4"} +1. If you have managed clusters registered to the hybrid runtime you are restoring, reconnect them. + Run the command and follow the instructions in the wizard: + `cf cluster add` + +1. Verify that you have a registered Git integration: + `cf integration git list --runtime ` + +1. If needed, create a new Git integration: + `cf integration git add default --runtime --provider github --api-url https://api.github.com` + +{::nomarkdown} +

            +{:/} + +#### Ingress example +This is an example of the `ingress.yaml` for `workflows`. + + ```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + ingress.kubernetes.io/protocol: https + ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/backend-protocol: https + nginx.ingress.kubernetes.io/rewrite-target: /$2 + creationTimestamp: null + name: runtime-name-workflows-ingress + namespace: runtime-name +spec: + ingressClassName: nginx + rules: + - host: your-ingress-host.com + http: + paths: + - backend: + service: + name: argo-server + port: + number: 2746 + path: /workflows(/|$)(.*) + pathType: ImplementationSpecific +status: + loadBalancer: {} +``` + + +### (Hybrid GitOps) Configure browser to allow insecure Runtimes + +If at least one of your Hybrid Runtimes was installed in insecure mode (without an SSL certificate for the ingress controller from a CA), the UI alerts you that _At least one runtime was installed in insecure mode_. +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-insecure-alert.png" + url="/images/runtime/runtime-insecure-alert.png" + alt="Insecure runtime installation alert" + caption="Insecure runtime installation alert" + max-width="100%" +%} + +All you need to do is to configure the browser to trust the URL and receive content. + +1. Select **View Runtimes** to the right of the alert. + You are taken to the Runtimes page, where you can see insecure Runtimes tagged as **Allow Insecure**. + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-insecure-steps.png" + url="/images/runtime/runtime-insecure-steps.png" + alt="Insecure runtimes in Runtime page" + caption="Insecure runtimes in Runtime page" + max-width="40%" +%} +{:start="2"} +1. For _every_ insecure Runtime, select **Allow Insecure**, and when the browser prompts you to allow access, do as relevant: + +* Chrome: Click **Advanced** and then **Proceed to site**. +* Firefox: Click **Advanced** and then **Accept the risk and continue**. +* Safari: Click **Show Certificate**, and then select **Always allow content from site**. +* Edge: Click **Advanced**, and then select **Continue to site(unsafe)**. + +### (Hybrid GitOps) View notifications in Activity Log + +The Activity Log is a quick way to monitor notifications for Runtime events such as upgrades. A pull-down panel in the Codefresh toolbar, the Activity Log shows ongoing, success, and error notifications, sorted by date, starting with today's date. + +1. In the Codefresh UI, on the top-right of the toolbar, select ![](/images/pipeline/monitoring/pipeline-activity-log-toolbar.png?display=inline-block) **Activity Log**. +1. To see notifications for provisioned Runtimes, filter by **Runtime**. + + {% include image.html + lightbox="true" + file="/images/runtime/runtime-activity-log.png" + url="/images/runtime/runtime-activity-log.png" + alt="Activity Log filtered by Runtime events" + caption="Activity Log filtered by Runtime events" + max-width="30%" + %} + +{:start="3"} + +1. To see more information on an error, select the **+** sign. + +### (Hybrid GitOps) Troubleshoot health and sync errors for Runtimes +The ![](/images/icons/error.png?display=inline-block) icon with the Runtime in red indicates either health or sync errors. + +**Health errors** +Health errors are generated by Argo CD and by Codefresh for Runtime components. + +**Sync errors** +Runtimes with sync errors display an **Out of sync** status in Sync Status column. They are related to discrepancies between the desired and actual state of a Runtime component or one of the Git sources associated with the Runtime. + +**View errors** +For both views, select the Runtime, and then select **Errors Detected**. +Here is an example of health errors for a Runtime. + + {% include image.html + lightbox="true" + file="/images/runtime/runtime-health-sync-errors.png" + url="/images/runtime/runtime-health-sync-errors.png" + alt="Health errors for runtime example" + caption="Health errors for runtime example" + max-width="30%" + %} + + +## Related articles +[Add Git Sources to GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/git-sources/) +[Add external clusters to GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/managed-cluster/) +[Shared configuration repo for GitOps Runtimes]({{site.baseurl}}/docs/reference/shared-configuration) + + diff --git a/_docs/installation/installation-options.md b/_docs/installation/installation-options.md new file mode 100644 index 000000000..908c507ad --- /dev/null +++ b/_docs/installation/installation-options.md @@ -0,0 +1,221 @@ +--- +title: "Installation environments" +description: "Understand Runner and GitOps installation options" +group: installation +toc: true +--- +To be changed and updated for ProjectOne + +The Codefresh platform supports three different installation options, all compliant with Soc2. + +* Hybrid Runner + The Runner installation is the hybrid installation mode for Codefresh pipelines. The Codefresh UI runs in the Codefresh cloud, and the builds run on customer premises. + The Runner combines flexibility with security, and is Enterprise customers looking for a "behind-the-firewall" solution. For a detailed look, read [Runner installation behind firewalls]({{site.baseurl}}/docs/reference/behind-the-firewall). + Pipelines created in Codefresh fetch code from your Git repository, packages/compiles the code, and deploys the final artifact to a target environment. + +* On-premises + On-premises installation is for customers who want full control over their environments. Both the UI and builds run on the Kubernetes cluster in an environment fully managed by you as our customer. + + While Codefresh can still help with maintenance of the on-premises platform, we would recommend the Hybrid Runner as it combines both flexibility and high security. + + + + +* GitOps + GitOps installation is a full-featured solution for application deployments and releases. Powered by the Argo Project, Codefresh uses Argo CD, Argo Workflows, Argo Events, and Argo Rollouts, extended with unique functionality and features essential for enterprise deployments. + + GitOps installations support Hosted and Hybrid options. + + + + + +## Hybrid Runner + +The Hybrid Runner installation is for organizations who want their source code to live within their premises, or have other security constraints. For more about the theory and implementation, see [[Runner installation behind firewalls]({{site.baseurl}}/docs/reference/behind-the-firewall). +The UI runs on Codefresh infrastructure, while the builds happen in a Kubernetes cluster in the customer's premises. + +{% include image.html + lightbox="true" + file="/images/installation/hybrid-installation.png" + url="/images/installation/hybrid-installation.png" + alt="sso-diagram.png" + max-width="70%" + %} + + +Hybrid Runner installation strikes the perfect balance between security, flexibility, and ease of use. Codefresh still does the heavy lifting for maintaining most of the platform parts. Sensitive data such as source code and internal services never leave customer premises. +Codefresh can easily connect to internal [secure services]({{site.baseurl}}/docs/reference/behind-the-firewall/#using-secure-services-in-your-pipelines) that have no public presence. +The UI is still compliant with Soc2. + + +The table lists the security implications of Hybrid Runner installation. + +{: .table .table-bordered .table-hover} +| Company Asset | Flow/Storage of data | Comments | +| -------------- | ---------------------------- |-------------------------| +| Source code | Stays behind the firewall | | +| Binary artifacts | Stay behind the firewall | | +| Build logs | Also sent to Codefresh Web application | | +| Pipeline volumes | Stay behind the firewall | | +| Pipeline variables | Defined in Codefresh Web application | | +| Deployment docker images | Stay behind the firewall| Stored on your Docker registry | +| Development docker images | Stay behind the firewall | Stored on your Docker registry| +| Testing docker images | Stay behind the firewall| Stored on your Docker registry | +| Inline pipeline definition | Defined in Codefresh Web application | | +| Pipelines as YAML file | Stay behind the firewall | | +| Test results | Stay behind the firewall | | +| HTML Test reports | Shown on Web application | Stored in your S3 or Google bucket or Azure storage | +| Production database data | Stays behind the firewall | | +| Test database data | Stays behind the firewall | | +| Other services (e.g. Queue, ESB) | Stay behind the firewall | | +| Kubernetes deployment specs | Stay behind the firewall | | +| Helm charts | Stay behind the firewall | | +| Other deployment resources/script (e.g. terraform) | Stay behind the firewall | | +| Shared configuration variables | Defined in Codefresh Web application | | +| Deployment secrets (from git/Puppet/Vault etc) | Stay behind the firewall| | +| Audit logs | Managed via Codefresh Web application | | +| SSO/Idp Configuration | Managed via Codefresh Web application | | +| User emails | Managed via Codefresh Web application | | +| Access control rules | Managed via Codefresh Web application | | + + + +## Codefresh On-premises + +For customers who want full control, Codefresh also offers on-premises installation. Both the UI and builds run on a Kubernetes cluster fully managed by the customer. + +See [Codefresh On-Prem Installation & Configuration]({{site.baseurl}}/docs/installation/codefresh-on-prem). + + +## Codefresh GitOps installation + +Codefresh GitOps also supports SaaS and hybrid installation options: + + +### Hosted GitOps +The SaaS version of GitOps, Hosted GitOps has Argo CD installed in the Codefresh cluster. +Hosted GitOps Runtime is installed and provisioned in a Codefresh cluster, and managed by Codefresh. +Hosted enviroments are full-cloud environments, where all updates and improvements are managed by Codefresh, with zero-maintenance overhead for you as the customer. +Currently, you can add one Hosted GitOps Runtime per account. +For the architecture, see [Hosted GitOps Runtime architecture]({{site.baseurl}}/docs/installation/architecture/#hosted-gitops-runtime-architecture). + + +{% include + image.html + lightbox="true" + file="/images/runtime/intro-hosted-hosted-initial-view.png" + url="/images/runtime/intro-hosted-hosted-initial-view.png" + alt="Hosted runtime setup" + caption="Hosted runtime setup" + max-width="80%" +%} + + For more information on how to set up the hosted environment, including provisioning hosted runtimes, see [Set up Hosted GitOps]({{site.baseurl}}/docs/installation/gitops/hosted-runtime/). + +### Hybrid GitOps +The hybrid version of GitOps, has Argo CD installed in the customer's cluster. +Hybrid GitOps is installed in the customer's cluster, and managed by the customer. +The Hybrid GitOps Runtime is optimal for organizations with security constraints, wanting to manage CI/CD operations within their premises. Hybrid GitOps strikes the perfect balance between security, flexibility, and ease of use. Codefresh maintains and manages most aspects of the platform, apart from installing and upgrading Hybrid GitOps Runtimes which are managed by the customer. + + +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view.png" + url="/images/runtime/runtime-list-view.png" + alt="Runtime List View" + caption="Runtime List View" + max-width="70%" +%} + + For more information on Hybrid GitOps, see [Hybrid GitOps runtime requirements]({{site.baseurl}}/docs/installation/gitops/hybrid-gitops/#minimum-system-requirements) and [Installling Hybrid GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/hybrid-gitops/). + + + + + +### Hosted vs.Hybrid GitOps + +The table below highlights the main differences between Hosted and Hybrid GitOps. + +{: .table .table-bordered .table-hover} +| GitOps Functionality |Feature | Hosted | Hybrid | +| -------------- | -------------- |--------------- | --------------- | +| Runtime | Installation | Provisioned by Codefresh | Provisioned by customer | +| | Runtime cluster | Managed by Codefresh | Managed by customer | +| | Number per account | One runtime | Multiple runtimes | +| | External cluster | Managed by customer | Managed by customer | +| | Upgrade | Managed by Codefresh | Managed by customer | +| | Uninstall | Managed by customer | Managed by customer | +| Argo CD | | Codefresh cluster | Customer cluster | +| CI Ops | Delivery Pipelines |Not supported | Supported | +| |Workflows | Not supported | Supported | +| |Workflow Templates | Not supported | Supported | +| CD Ops |Applications | Supported | Supported | +| |Image enrichment | Supported | Supported | +| | Rollouts | Supported | Supported | +|Integrations | | Supported | Supported | +|Dashboards |Home Analytics | Hosted runtime and deployments|Runtimes, deployments, Delivery Pipelines | +| |DORA metrics | Supported |Supported | +| |Applications | Supported |Supported | + + +## Installation options comparison +Codefresh Runner and GitOps environments can co-exist giving you the best of both worlds. + +{: .table .table-bordered .table-hover} +| Characteristic | Hybrid Runner | On Premise | GitOps +| -------------- | ---------------------------- |-------------------------| ----------------| +| Managed by | Codefresh and customer | Customer | Codefresh and customer | +| UI runs on | Public cloud | Private cluster | Public cloud| +| Builds run on | Private cluster | Private cluster | Private cluster (Hybrid)/Codefresh cluster (Hosted)| +| Access to secure/private services | Yes | Yes | Yes | +| Customer maintenance effort | Some | Full | Some | +| Best for | Companies with security constraints | Large scale installations | Companies with security constraints | +| Available to |[Enterprise plans](https://codefresh.io/contact-us/){:target="\_blank"} | [Enterprise plans](https://codefresh.io/contact-us/) |[Enterprise plans](https://codefresh.io/contact-us/) | + + +## Related articles +[Architecture]({{site.baseurl}}/docs/installation/runtime-architecture/) +[Add Git Sources to GitOps Runtimes]({{site.baseurl}}/docs/installation/gitops/git-sources/) +[Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration) + diff --git a/_docs/installation/runtime-architecture.md b/_docs/installation/runtime-architecture.md new file mode 100644 index 000000000..f6ec3650a --- /dev/null +++ b/_docs/installation/runtime-architecture.md @@ -0,0 +1,244 @@ +--- +title: "Runtime architecture" +description: "" +group: installation +toc: true +--- + +If you have familiarized yourself with the different installation, here's a deep dive into the architecture and components of Codefresh Runner and GitOps runtime architectures. + +## Runner architecture + +The most important components are the following: + +**Codefresh VPC:** All internal Codefresh services run in the VPC (analyzed in the next section). Codefresh uses Mongo and PostgreSQL to store user and authentication information. + +**Pipeline execution environment**: The Codefresh engine component is responsible for taking pipeline definitions and running them in managed Kubernetes clusters by automatically launching the Docker containers that each pipeline needs for its steps. + +**External actors**. Codefresh offers a [public API]({{site.baseurl}}/docs/integrations/ci-integrations/codefresh-api/) that is consumed both by the Web user interface and the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"}. The API is also available for any custom integration with external tools or services. + +### Runner topology + +If we zoom into Hybrid Runner services, we will see the following: + +{% include image.html + lightbox="true" + file="/images/installation/topology-new.png" + url="/images/installation/topology-new.png" + alt="Topology diagram" + caption="Topology diagram" + max-width="100%" + %} + +### Runner core components + +{: .table .table-bordered .table-hover} +|Category | Component | Function | +| -------------- | ----------| ----------| +| Core | **pipeline-manager**| Manages all CRUD operations for CI pipelines.| +| | **cfsign** | Signs server TLS certificates for docker daemons, and generates client TLS certificates for hybrid pipelines. | +| | **cf-api** | Central back-end component that functions as an API gateway for other services, and handles authentication/authorization. | +| | **context-manager**| Manages the authentications/configurations used by Codefresh CI/CD and by the Codefresh engine. | +| | **runtime-environment-manager**| Manages the different runtime environments for CI pipelines. The runtime environment for CI/CD SaaS is fully managed by Codefresh. For CI/CD Hybrid, customers can add their own runtime environments using private Kubernetes clusters. | +| Trigger | **hermes**| Controls CI pipeline trigger management. See [triggers]({{site.baseurl}}/docs/pipelines/triggers/). | +| | **nomios**| Enables triggers from Docker Hub when a new image/tag is pushed.See [Triggers from Docker Hub]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/). | +| | **cronus**| Enables defining Cron triggers for CI pipelines. See [Cron triggers]({{site.baseurl}}/docs/pipelines/triggers/cron-triggers/).| +| Log | **cf-broadcaster**| Stores build logs from CI pipelines. The UI and CLI stream logs by accessing the **cf-broadcaster** through a web socket. | +| Kubernetes | **cluster-providers** | Provides an interface to define cluster contexts to connect Kubernetes clusters in CI/CD installation environments. | +| | **helm-repo-manager** | Manages the Helm charts for CI/CD installation environments through the Helm repository admin API and ChartMuseum proxy. See [Helm charts in Codefresh]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/). | +| | **k8s-monitor** | The agent installed on every Kubernetes cluster, providing information for the Kubernetes dashboards. See [Kubernetes dashboards]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/). | +| |**charts-manager** | Models the Helm chart view in Codefresh. See [Helm chart view]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/). | +| | **kube-integration** | Provides an interface to retrieve required information from a Kubernetes cluster, can be run either as an http server or an NPM module. | +| | **tasker-kubernetes** | Provides cache storage for Kubernetes dashboards. See [Kubernetes dashboards]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/). | + + +## GitOps architecture + +The diagram shows a high-level view of the GitOps environment, and its core components, the Codefresh Control Plane, the Codefresh Runtime, and the Codefresh Clients. + +{% include +image.html +lightbox="true" +file="/images/getting-started/architecture/arch-codefresh-simple.png" +url="/images/getting-started/architecture/arch-codefresh-simple.png" +alt="Codefresh GitOps Platform architecture" +caption="Codefresh GitOps Platform architecture" +max-width="100%" +%} + +{::nomarkdown} +
            +{:/} + +### GitOps Control Plane +The Codefresh Control Plane is the SaaS component in the platform. External to the enterprise firewall, it does not have direct communication with the Codefresh Runtime, Codefresh Clients, or the customer's organizational systems. The Codefresh Runtime and the Codefresh Clients communicate with the Codefresh Control Plane to retrieve the required information. + + +{::nomarkdown} +
            +{:/} + +### GitOps Runtime +The GitOps Runtime is installed on a Kubernetes cluster, and houses the enterprise distribution of the Codefresh Application Proxy and the Argo Project. +Depending on the type of GitOps installation, the GitOps Runtime is installed either in the Codefresh platform (Hosted GitOps), or in the customer environment (Hybrid GitOps). Read more in [Codefresh GitOps Runtime architecture](#codefresh-gitops-runtime-architecture). + + +{::nomarkdown} +
            +{:/} + +### GitOps Clients + +GitOps Clients include the UI and the GitOps CLI. +The UI provides a unified, enterprise-wide view of deployments (runtimes and clusters), and CI/CD operations (Delivery Pipelines, workflows, and deployments) in the same location. +The Codefresh CLI includes commands to install hybrid runtimes, add external clusters, and manage runtimes and clusters. + +### GitOps Runtime architecture +The sections that follow show detailed views of the GitOps Runtime architecture for the different installation options, and descriptions of the GitOps Runtime components. + +* [Hosted GitOps runtime architecture](#hosted-gitops-runtime-architecture) + For Hosted GitOps, the GitOps Runtime is installed on a _Codefresh-managed cluster_ in the Codefresh platform. +* Hybrid GitOps runtime architecture: + For Hybrid GitOps, the GitOps Runtime is installed on a _customer-managed cluster_ in the customer environment. The Hybrid GitOps Runtime can be tunnel- or ingress-based: + * [Tunnel-based](#tunnel-based-hybrid-gitops-runtime-architecture) + * [Ingress-based](#ingress-based-hybrid-gitops-runtime-architecture) +* GitOps Runtime components + * [Application Proxy](#application-proxy) + * [Argo Project](#argo-project) + * [Request Routing Service](#request-routing-service) + * [Tunnel Server](#tunnel-server) + * [Tunnel Client](#tunnel-client) + + +#### Hosted GitOps runtime architecture +In the hosted environment, the Codefresh Runtime is installed on a K8s cluster managed by Codefresh. + +{% include + image.html + lightbox="true" + file="/images/getting-started/architecture/arch-hosted.png" + url="/images/getting-started/architecture/arch-hosted.png" + alt="Hosted runtime architecture" + caption="Hosted runtime architecture" + max-width="100%" +%} + +#### Tunnel-based Hybrid GitOps runtime architecture +Tunnel-based Hybrid GitOps runtimes use tunneling instead of ingress controllers to control communication between the GitOps Runtime in the customer cluster and the Codefresh GitOps Platform. Tunnel-based runtimes are optimal when the cluster with the GitOps Runtime is not exposed to the internet. + +{% include + image.html + lightbox="true" + file="/images/getting-started/architecture/arch-hybrid-ingressless.png" + url="/images/getting-started/architecture/arch-hybrid-ingressless.png" + alt="Tunnel-based hybrid runtime architecture" + caption="Tunnel-based hybrid runtime architecture" + max-width="100%" +%} + + +#### Ingress-based Hybrid GitOps runtime architecture +Ingress-based runtimes use ingress controllers to control communication between the GitOps Runtime in the customer cluster and the Codefresh GitOps Platform. Ingress-based runtimes are optimal when the cluster with the GitOps Runtime is exposed to the internet. + + + +{% include + image.html + lightbox="true" + file="/images/getting-started/architecture/arch-hybrid-ingress.png" + url="/images/getting-started/architecture/arch-hybrid-ingress.png" + alt="Ingress-based hybrid runtime architecture" + caption="Ingress-based hybrid runtime architecture" + max-width="100%" +%} + + +#### Application Proxy +The GitOps Application Proxy (App-Proxy) functions as the Codefresh agent, and is deployed as a service in the GitOps Runtime. + +For tunnel-based Hybrid GitOps Runtimes, the Tunnel Client forwards the incoming traffic from the Tunnel Server using the Request Routing Service to the GitOps App-Proxy. +For Hybrid GitOps Runtimes with ingress, the App-Proxy is the single point-of-contact between the GitOps Runtime, and the GitOps Clients, the GitOps Platform, and any organizational systems in the customer environment. + + +The GitOps App-Proxy: +* Accepts and serves requests from GitOps Clients either via the UI or CLI +* Retrieves a list of Git repositories for visualization in the Client interfaces +* Retrieves permissions from the GitOps Control Plane to authenticate and authorize users for the required operations. +* Implements commits for GitOps-controlled entities, such as Delivery Pipelines and other CI resources +* Implements state-change operations for non-GitOps controlled entities, such as terminating Argo Workflows + +{::nomarkdown} +
            +{:/} + +#### Argo Project + +The Argo Project includes: +* Argo CD for declarative continuous deployment +* Argo Rollouts for progressive delivery +* Argo Workflows as the workflow engine +* Argo Events for event-driven workflow automation framework + +>Codefresh users rely on our platform to deliver software reliably, and predictably without interruption. + To maintain that high standard, we add several weeks of testing and bug fixes to new versions of Argo before making them available within Codefresh. + Typically, new versions of Argo are available within 30 days of release in Argo. + +{::nomarkdown} +

            +{:/} + +#### Request Routing Service +The Request Routing Service is installed on the same cluster as the GitOps Runtime in the customer environment. +It receives requests from the the Tunnel Client (tunnel-based) or the ingress controller (ingress-based), and forwards the request URLs to the Application Proxy, and webhooks directly to the Event Sources. + +>Important: + The Request Routing Service is available from runtime version 0.0.543 and higher. + Older runtime versions are not affected as there is complete backward compatibility, and the ingress controller continues to route incoming requests. + +#### Tunnel Server +Applies only to _tunnel-based_ Hybrid GitOps Runtimes. +The Codefresh Tunnel Server is installed in the Codefresh platform. It communicates with the enterprise cluster located behind a NAT or firewall. + +The Tunnel Server: +* Forwards traffic from Codefresh Clients to the client (customer) cluster. +* Manages the lifecycle of the Tunnel Client. +* Authenticates requests from the Tunnel Client to open tunneling connections. + +{::nomarkdown} +
            +{:/} + +#### Tunnel Client +Applies only to _tunnel-based_ Hybrid GitOps Runtimes. + +Installed on the same cluster as the Hybrid GitOps Runtime, the Tunnel Client establishes the tunneling connection to the Tunnel Server via the WebSocket Secure (WSS) protocol. +A single Hybrid GitOps Runtime can have a single Tunnel Client. + +The Tunnel Client: +* Initiates the connection with the Tunnel Server. +* Forwards the incoming traffic from the Tunnel Server through the Request Routing Service to App-Proxy, and other services. + +{::nomarkdown} +
            +{:/} + + +#### Customer environment +The customer environment that communicates with the GitOps Runtime and Codefresh, generally includes: +* Ingress controller for ingress-based Hybrid runtimes + The ingress controller is configured on the same Kubernetes cluster as the GitOps Runtime, and implements the ingress traffic rules for the GitOps Runtime. + See [Ingress controller requirements]({{site.baseurl}}/docs/installation/gitops/monitor-manage-runtimes/#ingress-controller). +* Managed clusters + Managed clusters are external clusters registered to provisioned Hosted or Hybrid GitOps runtimes for application deployment. + Hosted GitOps requires you to connect at least one external K8s cluster as part of setting up the Hosted GitOps environment. + Hybrid GitOps allow you to add external clusters after provisioning the runtimes. + See [Add external clusters to runtimes]({{site.baseurl}}/docs/installation/gitops/managed-cluster/). +* Organizational systems + Organizational Systems include the customer's tracking, monitoring, notification, container registries, Git providers, and other systems. They can be entirely on-premises or in the public cloud. + Either the ingress controller (ingress hybrid environments), or the Tunnel Client (tunnel-based hybrid environments), forwards incoming events to the GitOps Application Proxy. + + ## Related articles +[Codefresh pricing](https://codefresh.io/pricing/){:target="\_blank"} +[Codefresh features](https://codefresh.io/features/){:target="\_blank"} + + \ No newline at end of file diff --git a/_docs/integrations/amazon-web-services.md b/_docs/integrations/amazon-web-services.md new file mode 100644 index 000000000..28494a1f2 --- /dev/null +++ b/_docs/integrations/amazon-web-services.md @@ -0,0 +1,93 @@ +--- +title: "Amazon Web Services (AWS) pipeline integration" +description: "How to use Codefresh with AWS" +group: integrations +toc: true +--- + +Codefresh has native support for AWS in the following areas: + +- [Connecting to Amazon registries]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) +- [Deploying to Amazon EKS]({{site.baseurl}}/docs/integrations/kubernetes/#adding-eks-cluster) +- [Using Amazon S3 for Test reports]({{site.baseurl}}/docs/testing/test-reports/#connecting-an-s3-bucket) +- [Using Amazon S3 for Helm charts]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/#private-repository---s3) + + +## Using Amazon ECR + +Amazon Container Registries are fully compliant with the Docker registry API that Codefresh follows. Follow the instruction under [Amazon EC2 Container Registry]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) to connect. + +Once the registry is added, you can use the [standard push step]({{site.baseurl}}/docs/pipelines/steps/push/) in your pipelines. See [working with Docker registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) for more information. + +## Deploying to Amazon Kubernetes + +Codefresh has native support for connecting an EKS cluster in the [cluster configuration screen]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster). + +{% + include image.html + lightbox="true" +file="/images/integrations/aws/aws-integration.png" +url="/images/integrations/aws/aws-integration.png" +alt="Connecting an Amazon cluster" +caption="Connecting a Amazon cluster" +max-width="40%" +%} + +Once the cluster is connected, you can use any of the [available deployment options]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/) for Kubernetes clusters. You also get access to all other Kubernetes dashboards such as the [cluster dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) and the [environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/). + +## Storing test reports in Amazon S3 bucket + +Codefresh has native support for test reports. You can store the reports on Amazon S3. + +{% include +image.html +lightbox="true" +file="/images/integrations/aws/amazon-storage.png" +url="/images/integrations/aws/amazon-storage.png" +alt="Amazon cloud storage" +caption="Amazon cloud storage" +max-width="60%" +%} + +See the full documentation for [test reports]({{site.baseurl}}/docs/testing/test-reports/). + +## Using Amazon S3 for storing Helm charts + +You can connect an Amazon S3 bucket as a Helm repository in the [integrations screen]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/). + +{% include +image.html +lightbox="true" +file="/images/integrations/aws/amazon-s3-helm-repo.png" +url="/images/integrations/aws/amazon-s3-helm-repo.png" +alt="Using Amazon for Helm charts" +caption="Using Amazon for Helm charts" +max-width="80%" +%} + +Once you connect your Helm repository you can use it any [Codefresh pipeline with the Helm step]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + + +## Traditional Amazon deployments + +For any other Amazon deployment you can use the [Amazon CLI from a Docker image](https://hub.docker.com/r/amazon/aws-cli){:target="\_blank"} in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + +`YAML` +{% highlight yaml %} +{% raw %} + create_a_vm: + title: "Creating a Virtual machine" + type: "freestyle" + arguments: + image: "amazon/aws-cli" + commands: + - aws ec2 run-instances --image-id ami-xxxxxxxx --count 1 --instance-type t2.micro --key-name MyKeyPair --security-group-ids sg-903004f8 --subnet-id subnet-6e7f829e +{% endraw %} +{% endhighlight %} + + +## Related articles +[Add your cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Cloning Git repositories]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) + diff --git a/_docs/integrations/argocd.md b/_docs/integrations/argocd.md new file mode 100644 index 000000000..2889c926e --- /dev/null +++ b/_docs/integrations/argocd.md @@ -0,0 +1,177 @@ +--- +title: "ArgoCD integration for CI pipelines" +description: "Connect Codefresh to your ArgoCD endpoint" +group: integrations +toc: true +--- + + +Before you can use Codefresh and ArgoCD together, you need to connect your ArgoCD installation in your Codefresh account. This way Codefresh will send and receive information from your ArgoCD instance. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/gitops-environment.png" + url="/images/guides/gitops/gitops-environment.png" + alt="GitOps deployments with Codefresh" + caption="GitOps deployments with Codefresh" + max-width="100%" + %} + +>Important: + Codefresh has a --> + +## Set up ArgoCD integration in Codefresh + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **GitOps** and then click **Configure**. +1. From the **Add GitOps Provider** dropdown, select **ArgoCD**. +1. Follow the on-screen instructions to complete the integration. + +### Codefresh CLI +To connect to an existing ArgoCD installation: +1. Install the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} by following the [documentation](https://codefresh-io.github.io/cli/installation/){:target="\_blank"}. + The Codefresh CLI installs an agent in your cluster, in the same namespace that ArgoCD runs in. The agent handles all communication between ArgoCD and Codefresh. +1. Authenticate the CLI with your Codefresh account [by creating an API token]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions). Make sure that you choose all scopes if this is the first time you are authenticating your CLI with Codefresh. +1. From a workstation that has a `kubeconfig` context pointing to the ArgoCD cluster, run the installation command: + +``` +codefresh install gitops argocd-agent +``` + +>You can also run it from your cloud console if you install codefresh CLI there. + +1. Answer the questions asked by the wizard. These include: + + * The name of the integration (user-defined) + * Your ArgoCD URL, username and password (you can also use [an auth token](https://argoproj.github.io/argo-cd/operator-manual/user-management/){:target="\_blank"} instead of password) + * The context and namespace in the cluster where ArgoCD is installed + * If you want to automatically import your ArgoCD applications to Codefresh + +``` +codefresh install gitops argocd-agent +This installer will guide you through the Codefresh ArgoCD installation agent to integrate your ArgoCD with Codefresh +? Select Kubernetes context mydemoAkscluster +? Codefresh integration name argocd +? Choose an authentication method Username and password +? Argo username admin +? Argo password ***************************** + +Testing requirements +-------------------- +√ checking argocd credentials... +√ checking argocd projects accessibility... +√ checking argocd applications accessibility... +-------------------- + +? Select Git/GithubApp context (Please create a dedicated context for the agent to avoid hitting the Github rate limits or use github app integration) github-1 +? Select argocd sync behavior please Import all existing Argo applications to Codefresh +? Enable auto-sync of applications, this will import all existing applications and update Codefresh in the future Yes + +Installation options summary: + 1. Kubernetes Context: + 2. Kubernetes Namespace: argocd + 3. Git Integration: github-1 + 4. Codefresh Host: https://g.codefresh.io + 5. ArgoCD Host: https://52.154.209.119 + 6. ArgoCD Username: admin + 7. ArgoCD Password: ****** + 8. Enable auto-sync of applications: Yes + 9. HTTP proxy: none + 10. HTTPS proxy: none + +Argo agent installation finished successfully to namespace "argocd" +Gitops view: "https://g.codefresh.io/gitops" +Documentation: "https://codefresh.io/docs/docs/ci-cd-guides/gitops-deployments/" +``` + +Once the installation is complete, you should see the agent's health status: + +{% include image.html + lightbox="true" + file="/images/integrations/argocd/argocd-agent-health.png" + url="/images/integrations/argocd/argocd-agent-health.png" + alt="ArgoCD agent health" + caption="ArgoCD agent health" + max-width="100%" + %} + + +This concludes the basic integration. You can repeat the procedure for different ArgoCD installations by choosing a different +name for the integration. + +## Creating ArgoCD applications + +In addition to the existing [Kubernetes/Helm environments]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/), you can now create ArgoCD applications via the Codefresh UI. + +Visit your GitOps dashboard by clicking on *GitOps* from the left sidebar. The click the *Add Application* button at the top right. + +If you already have an application set up in ArgoCD, you can enter its project and name and Codefresh will automatically retrieve all information from the ArgoCD instance. + +{% include image.html + lightbox="true" + file="/images/integrations/argocd/argocd-existing-app.png" + url="/images/integrations/argocd/argocd-existing-app.png" + alt="Using an existing ArgoCD application in a Codefresh environment" + caption="Using an existing ArgoCD application in a Codefresh environment" + max-width="60%" + %} + +You can also create a brand-new application with the *provision* option. In this dialog you can enter the exact same details that [ArgoCD asks when creating a new application](https://argoproj.github.io/argo-cd/getting_started/#6-create-an-application-from-a-git-repository). + +{% include image.html + lightbox="true" + file="/images/integrations/argocd/argocd-provision-app.png" + url="/images/integrations/argocd/argocd-provision-app.png" + alt="Creating a new ArgoCD application in a Codefresh environment" + caption="Creating a new ArgoCD application in a Codefresh environment" + max-width="60%" + %} + +The options are: + +* Name - User defined name of the Codefresh environment dashboard +* Project - A way to [group/secure applications](https://argoproj.github.io/argo-cd/user-guide/projects/). Choose default if you have only one project in ArgoCD. +* Application - name of application +* Manual/automatic sync - If automatic when a git commit happens, a deployment will automatically take place. +* Use schema - Kubernetes manifests will be checked for correctness before deployed to the cluster +* source repository - Git repository that holds your Kubernetes manifests +* revision - Revision to be checked out when a deployment happens +* path - folder inside the Git repository that should be searched for manifests (if your Git repo has multiple applications). Use `./` if all your manifests are in the root folder. +* cluster - Kubernetes cluster when deployment will take place +* namespace - Kubernetes namespace where the application will be deployed to +* directory recurse - whether to check all folders in the Git repository for manifests in a recursive way. + +For a sample application you can use the [https://github.com/codefresh-contrib/gitops-kubernetes-configuration](https://github.com/codefresh-contrib/gitops-kubernetes-configuration) repository (or even fork it on your own GitHub account first). + +Codefresh will communicate with ArgoCD via its API and pass all the relevant details. + +The end result is a new entry for your ArgoCD application will now appear in the dashboard along with the sync status. + +{% include image.html + lightbox="true" + file="/images/integrations/argocd/argocd-environment.png" + url="/images/integrations/argocd/argocd-environment.png" + alt="ArgoCD environment status" + caption="ArgoCD environment status" + max-width="80%" + %} + +To learn about the full GitOps support in Codefresh, see our [GitOps deployment guide]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/). + +## Uninstall the gitops agent + +You can uninstall the gitops agent with : + +``` +codefresh uninstall gitops argocd-agent +``` + +Note this will only uninstall the Codefresh agent. Your Argo CD installation +will remain unaffected. + +## Related articles +[Environment Dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +[Kubernetes integration]({{site.baseurl}}/docs/integrations/kubernetes/) + + + diff --git a/_docs/integrations/codecov-integration.md b/_docs/integrations/codecov-integration.md new file mode 100644 index 000000000..4f7b2f303 --- /dev/null +++ b/_docs/integrations/codecov-integration.md @@ -0,0 +1,71 @@ +--- +title: "Codecov integration for CI pipelines" +description: "Create Code Coverage Reports with Codefresh and Codecov" +group: integrations +toc: true +--- + +Codefresh has native integration for [Codecov analysis](https://about.codecov.io/){:target="\_blank"}. +You need to first set up a new project in Codecov. + +## Set up a new project in Codecov + +* Sign up for a free account with Codecov. +* Add a new project. + +{% include image.html +lightbox="true" +file="/images/integrations/codecov-integration/codecovtoken.png" +url="/images/integrations/codecov-integration/codecovtoken.png" +max-width="70%" +caption="Getting a Token from Codecov" +alt="Getting a Token from Codecov" +%} + +* Note down the Token as you will need it to set up the Codecov integration in Codefresh. + +## Set up Codecov integration in Codefresh + + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Codecov** and then click **Configure**. +1. Click **Add Codecov**. +1. Define the following: + * **Integration Name**: Enter a name for the integration which is used to reference it in `codefresh.yaml`. + * **Token**: Paste the token that you copied when you created the new Codecov project for this integration. + * **Url**: The base URL for this integration. Do not add the trailing slash to the URL definition. For more information, see the [official Codecov documentation](https://docs.codecov.com/docs/configuration#codecov-url){:target="\_blank"}. + + +{% include image.html +lightbox="true" +file="/images/integrations/codecov-integration/codecovintegration.png" +url="/images/integrations/codecov-integration/codecovintegration.png" +max-width="70%" +caption="Enter Token" +alt="Enter Token" +%} + + + +## Using Codecov in a CI pipeline + +With the integration in place, you can reference it by name in any Codefresh pipeline by using the [Codecov reporter step](https://codefresh.io/steps/step/codecov-reporter){:target="\_blank"}. + +`codefresh.yml` +```yaml + codecov-report: + stage: "prepare" + title: Codecov report + type: codecov-reporter + arguments: + codecov_integration: my-codecov-integration +``` + +For more details see our [Codecov example](https://codefresh.io/docs/docs/example-catalog/ci-examples/codecov-testing/). + +## Related articles +[Integration Tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Service Containers]({{site.baseurl}}/docs/pipelines/service-containers/) +[Coveralls Example]({{site.baseurl}}/docs/example-catalog/ci-examples/coveralls-testing/) +[Codacy Example]({{site.baseurl}}/docs/example-catalog/ci-examples/codacy-testing/) +[Test Reports]({{site.baseurl}}/docs/testing/test-reports/) \ No newline at end of file diff --git a/_docs/integrations/codefresh-api.md b/_docs/integrations/codefresh-api.md new file mode 100644 index 000000000..1ec768888 --- /dev/null +++ b/_docs/integrations/codefresh-api.md @@ -0,0 +1,527 @@ +--- +title: "Codefresh API pipeline integration" +description: "Integrate Codefresh CI pipelines with other systems" +group: integrations +redirect_from: + - /docs/codefresh-api/ +toc: true +old_url: /docs/codefresh-api +--- + +Codefresh offers a comprehensive API that you can use to integrate with any other application or solution you already have. + +The full details of the API are documented at [https://g.codefresh.io/api/](https://g.codefresh.io/api/){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/integrations/api/overview.png" +url="/images/integrations/api/overview.png" +alt="Using the Codefresh API" +max-width="70%" +%} + +You can use the API in various ways: + +* From your local workstation, with any tool that speaks HTTP (such as [postman](https://github.com/postmanlabs){:target="\_blank"}, [httpie](https://httpie.org/){:target="\_blank"}, [curl](https://curl.haxx.se/){:target="\_blank"} etc.). +* From another HTTP-enabled tool such as Jenkins. For example, you can trigger [Codefresh pipelines from Jenkins jobs]({{site.baseurl}}/docs/integrations/jenkins-integration/#calling-codefresh-pipelines-from-jenkins-jobs). +* Using the [Codefresh command line interface](https://codefresh-io.github.io/cli/){:target="\_blank"} which itself uses the API. +* Calling it programmatically from any other system. You can use your favorite programming language to make HTTP calls to Codefresh. + + +The Codefresh API is updated when new features are added in the Codefresh platform so you can expect any new functionality +to appear in the API as well. + +## Ways to use the Codefresh API + +There are several ways to use the API. Some of the most popular ones are: + + +1. Triggering builds from another system. You can start a Codefresh pipeline from any other internal system that you already have in your organization. +1. Getting the status of builds in another system. +1. Creating pipelines externally. You don't have to use the Codefresh UI to create pipelines. You can create them programmatically using your favorite template mechanism. You can reuse pipelines using your own custom implementation if you have special needs in your organization. + +You can browse the current API at [https://g.codefresh.io/api/](https://g.codefresh.io/api/){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/integrations/api/codefresh-api-example.png" +url="/images/integrations/api/codefresh-api-example.png" +alt="Browsing the Codefresh API" +caption="Browsing the Codefresh API" +max-width="70%" +%} + +For each call you will also see an example with `curl`. + +## Authentication instructions + + +1. Log in to your Codefresh account, and from your avatar dropdown, select [**User Settings**](https://g.codefresh.io/user/settings){:target="\_blank"}. +1. Scroll down to **API Keys**. +1. To create a new API key, click **Generate**, and do the following: + * **Key Name**: Enter the name of the key, preferable one that will help you remember its purpose. The token is tied to your Codefresh account and should be considered sensitive information. + * **Scopes**: Select the required [access scopes](#access-scopes). +1. Copy the token to your clipboard. +1. Click **Create**. + +{% include image.html +lightbox="true" +file="/images/integrations/api/generate-token.png" +url="/images/integrations/api/generate-token.png" +alt="Generating a key for the API" +caption="Generating a key for the API" +max-width="70%" +%} + + +From the same screen you can also revoke keys if you don't need them anymore. + +### Access scopes + +The following resources can be targeted with the API: + +* *Agent* - Used for [Codefresh Runner installation]({{site.baseurl}}/docs/reference/behind-the-firewall/) +* *Audit* - Read [Audit logs]({{site.baseurl}}/docs/administration/audit-logs/) +* *Build* - Get/change [build status]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/) +* *Cluster* - [Access control]({{site.baseurl}}/docs/administration/access-control/) for [Kubernetes clusters]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +* *Environments-v2* - Read/Write [Environment Dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) information +* *GitHub Actions* - Run [GitHub Actions inside Codefresh pipelines]({{site.baseurl}}/docs/integrations/github-actions/) +* *Pipeline* - [Access control]({{site.baseurl}}/docs/administration/access-control/) for [pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +* *Repos* - Refers to [Git repositories]({{site.baseurl}}/docs/integrations/git-providers/) +* *Step Type* - Refers to [custom pipeline steps]({{site.baseurl}}/docs/pipelines/steps/#creating-a-typed-codefresh-plugin) + + +The scopes available for each resource differ according to the type of resource. + + +## Using the API Key with the Codefresh CLI + +Once you have the key, use it in the Codefresh CLI: + +{% highlight bash %} +codefresh auth create-context --api-key +{% endhighlight %} + +Now the Codefresh CLI is fully authenticated. The key is stored in `~/.cfconfig` so you only need to run this command once. The CLI +can also work with [multiple authentication contexts](https://codefresh-io.github.io/cli/authentication/){:target="\_blank"} so you can manage multiple Codefresh accounts at the same time. + +## Example: Triggering pipelines + +You can trigger any pipeline in Codefresh and even pass extra environment variables (even if they are not +declared in the UI). + +Triggering a pipeline via the Codefresh CLI: + +{% highlight bash %} +codefresh run kostis-codefresh/nestjs-example/ci-build -b master -t nestjs-example-trigger-name +{% endhighlight %} + +You can pass extra environment variables as well: +{% highlight bash %} +codefresh run kostis-codefresh/nestjs-example/ci-build -b master -t nestjs-example-trigger-name -v sample-var1=sample1 -v SAMPLE_VAR2=SAMPLE2 +{% endhighlight %} + +For the API, you can trigger a pipeline by finding its `serviceId` from the UI + +{% highlight bash %} +curl 'https://g.codefresh.io/api/builds/5b1a78d1bdbf074c8a9b3458' --compressed -H 'content-type:application/json; charset=utf-8' -H 'Authorization: ' --data-binary '{"serviceId":"5b1a78d1bdbf074c8a9b3458","type":"build","repoOwner":"kostis-codefresh","branch":"master","repoName":"nestjs-example"}' +{% endhighlight %} + +You can also pass extra environment variables using an array + +{% highlight bash %} +curl 'https://g.codefresh.io/api/builds/5b1a78d1bdbf074c8a9b3458' --compressed -H 'content-type:application/json; charset=utf-8' -H 'Authorization: ' --data-binary '{"serviceId":"5b1a78d1bdbf074c8a9b3458","type":"build","repoOwner":"kostis-codefresh","branch":"master","repoName":"nestjs-example","variables":{"sample-var1":"sample1","SAMPLE_VAR2":"SAMPLE2"}}' +{% endhighlight %} + +## Example: Getting status from builds + +You can get the status of a build from the CLI by using the build ID: + +{% highlight bash %} +codefresh get builds 5b4f927dc70d080001536fe3 +{% endhighlight %} + +Same thing with the API: + +{% highlight bash %} +curl -X GET --header "Accept: application/json" --header "Authorization: " "https://g.codefresh.io/api/builds/5b4f927dc70d080001536fe3" +{% endhighlight %} + +## Example: Creating Codefresh pipelines externally + +Codefresh has a great UI for creating pipelines for each of your projects. If you wish, you can also create pipelines +programmatically in an external manner. This allows you to use your own templating solution for re-using pipelines +and creating them from an external system. + +First you need a YAML file that defines the pipeline. This is a pipeline [specification](#full-pipeline-specification). + +>It is also very easy to create a a dummy pipeline in the Codefresh UI and then get its specification by running `codefresh get pipeline my-project/my-pipeline -o yaml > my-pipeline-spec.yml` + +Here is an example + +`Pipeline Spec` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: pipeline +metadata: + name: my-project/my-basic-pipeline + description: my description + labels: + key1: value1 + key2: value2 + deprecate: + applicationPort: '8080' + project: my-project +spec: + triggers: + - type: git + provider: github + name: my-trigger + repo: kostis-codefresh/nestjs-example + events: + - push + branchRegex: /./ + contexts: [] + variables: + - key: PORT + value: 5000 + encrypted: false + - key: SECRET + value: "secret-value" + encrypted: true + steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: github-1 + PrintFileList: + title: Listing files + image: 'alpine:latest' + commands: + - ls -l + stages: [] +{% endraw %} +{% endhighlight %} + +Save this spec into a file with an arbitrary name like `my-pipeline-spec.yml`. First create the new project (if it doesn't exist already): + +{% highlight bash %} +codefresh create project my-project +{% endhighlight %} + +Then you can create the pipeline with the cli + +{% highlight bash %} +codefresh create pipeline -f my-pipeline-spec.yml +{% endhighlight %} + +And your pipeline will be available in the GUI + +{% include image.html +lightbox="true" +file="/images/integrations/api/creation-of-pipeline.png" +url="/images/integrations/api/creation-of-pipeline.png" +alt="Created Pipeline" +caption="New pipeline created" +max-width="70%" +%} + +Notice that you must prefix the name of the pipeline with your username and repository so that it becomes +visible in the GUI under the correct project. + +## Full pipeline specification + +If you don't want to create a pipeline from an existing one, you can also create your own YAML from scratch. +The following sections contain an explanation of the fields. +> Codefresh automatically generates additional fields, usually fields with dates and internal ID numbers. While you cannot edit these fields, you can view them by exporting the pipeline. + +### Top level fields + +{: .table .table-bordered .table-hover} +| Field name | Parent field | Type | Value | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| `version` | | string | Always `'1.0'` | +| `kind` | | string | Always `pipeline` | +| `metadata` | | object | Holds various meta-information | +| `spec` | | object | Holds the pipeline definition and other related information | + + +### Metadata fields + +{: .table .table-bordered .table-hover} +| Field name | Parent field | Type | Value | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| `name` | `metadata` | string | the full pipeline name should be formatted `project_name/pipeline_name` | +| `project` | `metadata` | string | the project that contains this pipeline | +| `originalYamlString` | `metadata` | string | the full contents of the pipeline editor. Only kept for archival purposes | +| `labels` | `metadata` | object | Holds the `tags` array | +| `tags` | `labels` | array | A list of [access control tags]({{site.baseurl}}/docs/administration/access-control/#marking-pipelines-with-policy-attributes) for this pipeline | +| `description` | `metadata` | string | Human readable description of the pipeline | +| `isPublic ` | `metadata` | boolean | If true the pipeline logs [will be public]({{site.baseurl}}/docs/configure-ci-cd-pipeline/build-status/) even for non-authenticated users | +| `template ` | `metadata` | boolean | If true, this pipeline will be listed as a template when creating a new pipeline | + +Example of metadata: + +`Pipeline Spec` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: pipeline +metadata: + name: project_name/pipeline_name + project: project_name + labels: + tags: + - tag1 + - tag2 + description: pipeline description here + isPublic: false + template: + isTemplate: false +{% endraw %} +{% endhighlight %} + +### Spec fields + +{: .table .table-bordered .table-hover} +| Field name | Parent field | Type | Value | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| `steps` | `spec` | object | The [pipeline steps]({{site.baseurl}}/docs/codefresh-yaml/steps/) to be executed | +| `stages` | `spec` | array | The [pipeline stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for a better visual overview | +| `variables` | `spec` | array | List of variables defined in the pipeline itself | +| `contexts` | `spec` | array | Variable sets imported from [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) | +| `runtimeEnvironment` | `spec` | array | where to execute this pipeline | +| `terminationPolicy ` | `spec` | array | Termination settings of this pipeline | +| `concurrency ` | `spec` | number | How many instances of this pipeline [can run at the same time]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#policies) | +| `triggerConcurrency ` | `spec` | number | How many instances of this pipeline can run at the same time per trigger | +| `branchConcurrency ` | `spec` | number | How many instances of this pipeline can run at the same time per branch | +| `externalResources ` | `spec` | array | Optional external files available to this pipeline | +| `triggers` | `spec` | array | a list of [Git triggers]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) that affect this pipeline | +| `options` | `spec` | object | Extra options for the pipeline | +| `enableNotifications` | `options` | boolean | if false the pipeline will not send notifications to [Slack]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) and status updates back to the Git provider | + +### Pipeline variables + +The `variables` array has entries with the following fields: + +{: .table .table-bordered .table-hover} +| Field name | Parent field | Type | Value | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| `key` | `variables` | string | Name of the variable | +| `value` | `variables` | string | Raw value | +| `encrypted` | `variables` | boolean | if true the value is stored encrypted | + +Example of variables: + +`Pipeline Spec` +{% highlight yaml %} +{% raw %} + variables: + - key: my-key + value: my-value + encrypted: false + - key: my-second-variable + value: '*****' + encrypted: true +{% endraw %} +{% endhighlight %} + +Encrypted variables cannot be read back by exporting the pipeline. + +### Runtime environment + +The `runtimeEnvironment` selects the cluster that will execute the pipeline (mostly useful for organizations using the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/)) + +{: .table .table-bordered .table-hover} +| Field name | Parent field | Type | Value | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| `name` | `runtimeEnvironment` | string | Name the environment as connected by the runner | +| `cpu` | `runtimeEnvironment` | string | CPU share using Kubernetes notation | +| `memory` | `runtimeEnvironment` | string | memory share using Kubernetes notation | +| `dindStorage` | `runtimeEnvironment` | string | storage size using Kubernetes notation | + + +Example of metadata: + +`Pipeline Spec` +{% highlight yaml %} +{% raw %} +runtimeEnvironment: + name: my-aws-runner/cf + cpu: 2000m + memory: 800Mi + dindStorage: nullGi +{% endraw %} +{% endhighlight %} + + + +### External resources + +The `externalResources` field is an array of objects that hold [external resource information]({{site.baseurl}}/docs/pipelines/pipelines/#external-resources). + +{: .table .table-bordered .table-hover} +| Field name | Parent field | Type | Value | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| `type` | `externalResources` | string | Only `git` is supported | +| `source` | `externalResources` | string | Source folder or file path in Git repo | +| `context` | `externalResources` | string | Name of Git provider to be used | +| `destination` | `externalResources` | string | Target folder or file path to be copied to | +| `isFolder` | `externalResources` | boolean | if true path is a folder, else it is a single file | +| `repo` | `externalResources` | string | git repository name for the trigger. should be in format of `git_repo_owner/git_repo_name` | +| `revision` | `externalResources` | string | branch name or git hash to checkout | + + +`Pipeline Spec` +{% highlight yaml %} +{% raw %} +externalResources: + - type: git + source: /src/sample/venonalog.json + context: my-github-integration + destination: codefresh/volume/helm-sample-app/ + isFolder: false + repo: codefresh-contrib/helm-sample-app + revision: master +{% endraw %} +{% endhighlight %} + +### Git triggers + +The `triggers` field is an array of objects that hold [Git trigger information]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) with the following fields. + +{: .table .table-bordered .table-hover} +| Field name | Parent field | Type | Value | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| `name` | `triggers` | string | user defined trigger name | +| `type` | `triggers` | string | Always `git` | +| `repo` | `triggers` | string | git repository name for the trigger. should be in format of `git_repo_owner/git_repo_name` | +| `events` | `triggers` | array | All possible values are documented later. The possible values depend on Git provider | +| `pullRequestAllowForkEvents` | `triggers` | boolean | If this trigger is also applicable to Git forks | +| `commentRegex` | `triggers` | string | Only activate trigger if regex expression matches PR comment | +| `branchRegex ` | `triggers` | string | Only activate trigger if regex expression/string matches branch | +| `branchRegexInput ` | `triggers` | string | Defines what type of content is in `branchRegex`. Possible values are `regex`, `multiselect`, `multiselect-exclude` | +| `provider ` | `triggers` | string | Name of provider as found in Git integrations | +| `modifiedFilesGlob ` | `triggers` | string | Only activate trigger if changed files match glob expression | +| `disabled ` | `triggers` | boolean | if true, trigger will never be activated | +| `options ` | `triggers` | array | Choosing [caching behavior]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipeline-caching/) of this pipeline | +| `noCache ` | `options` | boolean | if true, docker layer cache is disabled | +| `noCfCache ` | `options` | boolean | if true, extra Codefresh caching is disabled | +| `resetVolume ` | `options` | boolean | if true, all files on volume will be deleted before each execution | +| `context ` | `triggers` | string | Name of git context to use | +| `contexts` | `spec` | array | Variable sets imported from [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) | +| `variables` | `triggers` | array | Override variables that were defined in the pipeline level | +| `runtimeEnvironment` | `triggers` | array | Override the runtime environment that was defined in the pipeline level | + +The possible values for the `events` array are the following (only those supported by the Git provider can be actually used) + + * `push.heads` - for push commit + * `push.tags` - for push tag event + * `pullrequest` - any pull request event + * `pullrequest.opened` - pull request opened + * `pullrequest.closed` - pull request closed + * `pullrequest.merged` - pull request merged + * `pullrequest.unmerged-closed` - pull request closed (not merged) + * `pullrequest.reopened` - pull request reopened + * `pullrequest.edited` - pull request edited + * `pullrequest.assigned` - pull request assigned + * `pullrequest.unassigned` - pull request unassigned + * `pullrequest.reviewRequested` - pull request review requested + * `pullrequest.reviewRequestRemoved` - pull request review request removed + * `pullrequest.labeled` - pull request labeled + * `pullrequest.unlabeled` - pull request unlabeled + * `pullrequest.synchronize` - pull request synchronized + * `pullrequest.commentAdded` - pull request comment added + * `release` - Git release event + +The `variables` and `runtimeEnvironment` fields have exactly the same format as in the parent pipeline fields but values defined in the trigger will take higher priority. + +Full example: + +`Pipeline Spec` +{% highlight yaml %} +{% raw %} +triggers: + - name: guysalton-codefresh/helm-sample-app + type: git + repo: guysalton-codefresh/helm-sample-app + events: + - push.heads + - pullrequest.commentAdded + pullRequestAllowForkEvents: true + commentRegex: /.*/gi + branchRegex: /^((master|develop)$).*/gi + branchRegexInput: regex + modifiedFilesGlob: /project1/** + provider: github + disabled: false + options: + noCache: false + noCfCache: false + resetVolume: false + verified: true + context: guyGithub + contexts: + - artifactory + variables: + - key: key + value: '*****' + encrypted: true + runtimeEnvironment: + name: docker-desktop/cf + cpu: 400m + memory: 800Mi + dindStorage: nullGi +{% endraw %} +{% endhighlight %} + + +## Using Codefresh from within Codefresh + +The Codefresh CLI is also packaged as a [Docker image on its own](https://hub.docker.com/r/codefresh/cli/){:target="\_blank"}. This makes it +very easy to use it from within Codefresh in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +For example, you can easily call pipeline B from pipeline A +with the following step: + +`codefresh.yml` of pipeline A +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myTriggerStep: + title: triggering another pipeline + image: codefresh/cli + commands: + - 'codefresh run -b=${{CF_BRANCH}}' -t + when: + condition: + all: + validateTargetBranch: '"${{CF_PULL_REQUEST_TARGET}}" == "production"' + validatePRAction: '''${{CF_PULL_REQUEST_ACTION}}'' == ''opened''' +{% endraw %} +{% endhighlight %} + +This step only calls pipeline B when a pull request is opened for the branch named `production`. + +Note that when you use the Codefresh CLI in a pipeline step, it is already configured, authenticated, and ready for use. +No additional authentication is required. + +## Related articles +[Codefresh API documentation](https://g.codefresh.io/api/){:target="\_blank"} +[Codefresh CLI documentation](https://codefresh-io.github.io/cli/){:target="\_blank"} + + diff --git a/_docs/integrations/codefresh-hosted-gitops.md b/_docs/integrations/codefresh-hosted-gitops.md new file mode 100644 index 000000000..14cbf9e2e --- /dev/null +++ b/_docs/integrations/codefresh-hosted-gitops.md @@ -0,0 +1,96 @@ +--- +title: "Hosted GitOps integration" +description: "Connect with our Hosted GitOps to leverage Managed Argo CD" +group: integrations +toc: true +--- + +Integrate Codefresh Classic with Codefresh's Hosted GitOps for deployments powered by managed Argo CD. +Use Codefresh Classic for pipelines, and Codefresh Hosted GitOps for deployments. + +Codefresh Hosted GitOps includes a dedicated report image step that both reports and enriches deployed images. Add the report image step in your Codefresh Classic pipeline and reference integrations with issue-tracking and container registry tools for Codefresh to retrieve and enrich image information. + +For a brief overview of what you get with Codefresh Hosted GitOps, read the next section. + +For information on how to connect Codefresh Classic to Codefresh Hosted GitOps, see [CI integration with Codefresh Classic](https://codefresh.io/docs/gitops-integrations/ci-integrations/codefresh-classic/){:target="\_blank"}. + +## Codefresh Hosted GitOps features + +### Hosted and hybrid runtimes +Codefresh Hosted GitOps is based on a hosted environemt, with the runtime hosted and managed by Codefresh. + +After the three-step process of provisioning your hosted runtime, Codefresh handles administration and maintenance of the hosted runtime, including version and security updates. + +{% include +image.html +lightbox="true" +file="/images/integrations/codefresh-hosted-gitops/hosted-runtime.png" +url="/images/integrations/codefresh-hosted-gitops/hosted-runtime.png" +caption="Provisioning a Hosted GitOps runtime" +alt="Provisioning a Hosted GitOps runtime" +max-width="70%" +%} + +### Dashboards for visibility and traceability + +A set of dashboards provides visibility into all aspects of deployment: + +* The Home dashboard presents enterprise-wide deployment highlights across runtimes and clusters. + Get insights into important KPIs and deployments, all in the same location. View status of runtimes and managed clusters, deployments, failed deployments with rollbacks, most active applications. Use filters to narrow the scope to focus on anything specific. + + {% include +image.html +lightbox="true" +file="/images/integrations/codefresh-hosted-gitops/hosted-home-dashboard.png" +url="/images/integrations/codefresh-hosted-gitops/hosted-home-dashboard.png" +caption="Home dashboard in Hosted GitOps" +alt="Home dashboard in Hosted GitOps" +max-width="70%" +%} + +* The Applications dashboard displays applications, also across runtimes and clusters, from which you can select individual applications for further analysis. + Individual application information is grouped by current and historical deployments, enriched with Argo, Jira, and Git details, including rollout visualizations for ongoing deployments, and an interactive tree view of application resources. + +{% include +image.html +lightbox="true" +file="/images/integrations/codefresh-hosted-gitops/hosted-app-dashboard.png" +url="/images/integrations/codefresh-hosted-gitops/hosted-app-dashboard.png" +caption="Applications dashboard in Hosted GitOps" +alt="Applications dashboard in Hosted GitOps" +max-width="70%" +%} + + +* The DORA metrics dashboard in Codefresh helps quantify DevOps performance. Apart from the metrics themselves, the DORA dashboard in Codefresh has several unique features to pinpoint just which applications or runtimes are contributing to problematic metrics. + +{% include +image.html +lightbox="true" +file="/images/integrations/codefresh-hosted-gitops/hosted-dora-metrics.png" +url="/images/integrations/codefresh-hosted-gitops/hosted-dora-metrics.png" +caption="DORA metrics in Hosted GitOps" +alt="DORA metrics in Hosted GitOps" +max-width="60%" +%} + +### Application management + +Manage the application lifecycle in the Codefresh UI, from creating, editing, and deleting applications, to quick manual sync when needed. + + +### Third-party integrations for image enrichment +Add integrations to issue-tracking tools such as Jira, and container-registries such as Docker Hub, JFrog and more, to enrich images. + +{% include +image.html +lightbox="true" +file="/images/integrations/codefresh-hosted-gitops/hosted-int-tools.png" +url="/images/integrations/codefresh-hosted-gitops/hosted-int-tools.png" +caption="Integrations in Hosted GitOps" +alt="Integrations in Hosted GitOps" +max-width="60%" +%} + +## Related articles +[CI integrations with GitOps]({{site.baseurl}}/docs/gitops-integrations/ci-integrations) diff --git a/_docs/integrations/datadog.md b/_docs/integrations/datadog.md new file mode 100644 index 000000000..609752af2 --- /dev/null +++ b/_docs/integrations/datadog.md @@ -0,0 +1,133 @@ +--- +title: "Datadog integration" +description: "Integrate Codefresh pipelines with Datadog for monitoring and analysis" +group: integrations +toc: true +--- + +Datadog is a SaaS-based monitoring and analytics platform for large-scale applications and infrastructure. Integrating Datadog with Codefresh allows you to leverage Codefresh to create your pipelines, and Datadog to monitor and analyze them. + +When a pipeline completes execution in Codefresh, Codefresh reports pipeline-execution data to Datadog for viewing in Datadog's Continuous Integration (CI) Visibility interface. + +For Datadog and Codefresh integration, you need: +* An API token from your Datadog account +* To define the settings in Codefresh + +> Note: Please reach out to Support if you’re interested in enabling Datadog for your account. + +## Get API token from Datadog account +If you already have a Datadog account, you can copy the API key if you have one, or generate a new API key. + +1. Log in to your Datadog account. +1. Go to **Organization Settings**, and select **API Keys**. + + {% include image.html +lightbox="true" +file="/images/integrations/datadog/datadog-api-key.png" +url="/images/integrations/datadog/datadog-api-key.png" +max-width="30%" +caption="Getting an API Key from your Datadog account" +alt="Getting an API Key from your Datadog account" +%} + +{:start="3"} +1. Copy the API key to use with your Codefresh integration. + +## Set up Datadog integration in Codefresh + +Configure the integration settings for Datadog within Codefresh. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Datadog**, and then **Configure**. +1. Click **Add Integration**. + + {% include image.html +lightbox="true" +file="/images/integrations/datadog/datadog-config-settings.png" +url="/images/integrations/datadog/datadog-config-settings.png" +max-width="30%" +caption="Datadog configuration settings" +alt="Datadog configuration settings" +%} + +{:start="4"} +1. Define the following: + * **Datadog site**: Select the site with your data. If you are not sure which Datadog site to select, select the _View documentation_ link below the field, and read Datadog's official documentation. + * **Token**: The API token you copied from your Datadog account. +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + +## Pipeline data from Codefresh in Datadog +See pipeline data in Datadog's CI Visibility interface. + +We have highlighted the main features in Datadog for Codefresh pipelines. For detailed descriptions and options, see [Datadog documentation on Exploring Pipelines](https://docs.datadoghq.com/continuous_integration/explore_pipelines/){:target="\_blank"}. + + +### Pipelines page in Datadog + +The Pipelines page shows aggregated data for each pipeline, for the selected time range. You can see the failure rate and average build duration against the total number of executions of a pipeline, alongside the metrics from the most recent build of the same pipeline. + +Below is an example of the Pipelines page with Codefresh pipelines, prefixed by the Codefresh logo. + + {% include image.html +lightbox="true" +file="/images/integrations/datadog/datadog-pipelines-page.png" +url="/images/integrations/datadog/datadog-pipelines-page.png" +max-width="30%" +caption="Pipelines page in Datadog with Codefresh pipelines" +alt="Pipelines page in Datadog with Codefresh pipelines" +%} + +### Pipeline Details page in Datadog + +Selecting a pipeline takes you to the Pipeline Details page in Datadog which provides in-depth data for the pipeline. +Here you can see the failure rate and average build duration for the selected pipeline, and data on its branches, and jobs (referred to as steps in Codefresh). +You also have the option of viewing executions in the dedicated Pipeline Executions page. + +Below is an example of the Pipeline Details page for the selected Codefresh pipeline. + + + {% include image.html +lightbox="true" +file="/images/integrations/datadog/datadog-pipeline-drilldown.png" +url="/images/integrations/datadog/datadog-pipeline-drilldown.png" +max-width="30%" +caption="Drilldown view for selected pipeline in Datadog Pipeline Details page" +alt="Drilldown view for selected pipeline in Datadog Pipeline Details page" +%} + +### Pipeline Executions page in Datadog + +The Pipeline Executions page shows day-by-day execution data for the selected pipeline or pipelines. + +Below is an example of the Pipeline Executions page with execution data for Codefresh pipelines. + + + {% include image.html +lightbox="true" +file="/images/integrations/datadog/datadog-pipeline-executions.png" +url="/images/integrations/datadog/datadog-pipeline-executions.png" +max-width="30%" +caption="Execution timeline view in Datadog Pipeline Executions page" +alt="Execution timeline view in Datadog Pipeline Executions page" +%} + +### Pipeline Dashboards page in Datadog + +Pipeline Dashboards is your go-to location for a quick look at performance and step metrics across pipelines. You can customize the widgets in the dashboard to display the data that is of interest to you. + +Below is an example of the Pipeline Dashboards page. + + + {% include image.html +lightbox="true" +file="/images/integrations/datadog/datadog-pipeline-dashboard.png" +url="/images/integrations/datadog/datadog-pipeline-dashboard.png" +max-width="30%" +caption="Pipelines Dashboards page in Datadog" +alt="Pipelines Dashboards in Datadog" +%} + +## Related articles +[Integration Tests]({{site.baseurl}}/docs/testing/integration-tests/) \ No newline at end of file diff --git a/_docs/integrations/docker-registries.md b/_docs/integrations/docker-registries.md new file mode 100644 index 000000000..271201060 --- /dev/null +++ b/_docs/integrations/docker-registries.md @@ -0,0 +1,141 @@ +--- +title: "Docker Registries for pipeline integrations" +description: "Connect your Docker Registry to Codefresh CI pipelines" +group: integrations +redirect_from: + - /docs/docker-registry/ + - /docs/docker-registries/external-docker-registries/ + - /docs/docker-registries/ + - /docs/codefresh-registry/ + - /docs/docker-registries/codefresh-registry/ +toc: true +--- +Codefresh enables you to integrate with several Docker container registries, including (but not limited to): + +* [Docker Hub](docker-hub) +* [Azure Container Registry](azure-docker-registry) +* [Google Container Registry](google-container-registry) +* [Google Artifact Registry](google-artifact-registry) +* [Amazon EC2 Container Registry](amazon-ec2-container-registry) +* [Bintray.io/Artifactory](bintray-io) +* [Quay.io](quay-io) +* [Github Container Registry](github-container-registry) + +For a different registry choose to configure using the [Other](other-registries) option. + +The registries can either be public or private. + +## General Configuration + + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. + +{% include image.html + lightbox="true" + file="/images/integrations/codefresh-integrations.png" + url="/images/integrations/codefresh-integrations.png" + alt="Codefresh Account Integration" + max-width="80%" %} + +{:start="4"} +1. From the **Add Registry Provider** drop-down, select the regsitry type to add. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/add-docker-registry.png" + url="/images/integrations/docker-registries/add-docker-registry.png" + alt="Add Docker Registry" + max-width="45%" %} + +{:start="5"} +1. Each configuration must be given a unique name, which you can later reference in a codefresh.yml file. + + + +## Define fallback registry + +Codefresh has a feature that allows users to designate a fallback registry for Docker integrations. If a Codefresh pipeline attempts to pull an image and that image fails for any reason (authorization issue, the registry server is down, etc.), a retry mechanism will attempt to pull it successfully. If this mechanism fails, the fallback registry feature provides the opportunity to pull the image from a different registry you have specified. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. In the list of registries, select the registry to configure as the fallback registry, and click **Edit**. +1. Expand **Advanced Options**, and select the registry from the **Fallback Registry** list. + You can also specify a fallback registry when creating a new integration as long as another integration exists. + +## Using an optional repository prefix + +For each supported Registry, define a prefix string for your Docker images to be used globally. + +This is handy for registries that require a prefix (usually the name of an organization or repository) as you can set it once, instead of having each pipeline using the prefix by itself. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/repository-prefix.png" + url="/images/integrations/docker-registries/repository-prefix.png" + alt="Setting a Registry prefix" + caption="Setting a Registry prefix" + max-width="60%" + %} + +See more details at [pushing Docker images]({{site.baseurl}}/docs/#pushing-docker-images). + +## Pushing an image + +Once your registry configuration is all set up you can start pushing your images to it. + +Within a [push step]({{site.baseurl}}/docs/pipelines/steps/push/), add your registry configuration name in the `registry` field + + `codefresh.yml` +{% highlight yaml %} +push_step: + type: push + description: Free text description + candidate: {% raw %}${{build_step}}{% endraw %} + tag: {% raw %}${{CF_BRANCH}}{% endraw %} + registry: your-registry-configuration-name +{% endhighlight %} + +For more details, see the [example for image push]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/). + +## Internal caching registry + +You can also select a single registry that will serve as your [caching registry]({{site.baseurl}}/docs/pipelines/pipeline-caching/#docker-registry-caching). + +> You cannot select Dockerhub as a caching registry, because it has very strict requirements for naming images, and our caching mechanism needs capabilities which are not possible with Dockerhub. + +Codefresh uses that registry efficiently to perform advanced caching logic for your builds by automatically: + +* Checking the stored metadata to decide which past image is most relevant for caching purposes +* Pulling images from this registry for caching purposes +* Using that registry for distributed Docker layer caching to make your Docker builds faster + +We give you the ability to define a separate registry for caching purposes for the following scenarios: + +1. You don't want extra traffic to be sent to your main deployment registry. Maybe you want to avoid bandwidth/storage limits in your production registry +1. You have lots of build steps in pipelines with intermediate docker images that you are certain you don't need outside of the pipeline itself. In that case you can use the `disable_push` property in those pipelines. +1. You have speed concerns regarding image pulling/pushing. For example your development team is in Europe, but your production servers are in the USA. You would probably choose a caching registry in a European region (so that developers get the best experience), where your main registry is in the USA (close to your production servers) + +Therefore, in most cases you should make your main registry your caching registry as well. For extra control, you can either define a different caching registry or disable selectively automatic pushes with the `disable_push` property. + +>Notice that the dynamic image feature of Codefresh (creating docker images on demand in the same pipeline that is using them) will always work regardless of a caching registry. + +## Default registry + +If you define more than one registry, you can select a registry as the default one. Codefresh uses the default registry in both [build]({{site.baseurl}}/docs/pipelines/steps/build/) and [push]({{site.baseurl}}/docs/pipelines/steps/push/) steps if they don't already include a `registry` property. + +> Successful build steps always push to the default Codefresh registry, unless you also define the `disable_push` property. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the context menu of the Docker registry integration to be used as the default registry, select **Set as default**. + + + +## Related articles +[Examples of pushing Docker images]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/) diff --git a/_docs/integrations/docker-registries/amazon-ec2-container-registry.md b/_docs/integrations/docker-registries/amazon-ec2-container-registry.md new file mode 100644 index 000000000..8e534868a --- /dev/null +++ b/_docs/integrations/docker-registries/amazon-ec2-container-registry.md @@ -0,0 +1,179 @@ +--- +title: "Amazon EC2 Container Registry" +description: "Use the Amazon Docker Registry for pipeline integrations" +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/aws/ + - /docs/docker-registries/external-docker-registries/amazon-ec2-container-registry/ +toc: true +--- + +## Set up ECR integration for IAM user + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Amazon ECR**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Region**: AWS region. + * **Access Key ID**: Your AWS accessKeyId. + * **Secret Access Key**: Your AWS accessKeyId. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/add-amazon-ecr-registry.png" + url="/images/integrations/docker-registries/add-amazon-ecr-registry.png" + alt="Amazon EC2 Container Registry settings" + caption="Amazon EC2 Container Registry settings" + max-width="60%" %} + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + +Codefresh makes sure to automatically refresh the AWS token for you. + +For more information on how to obtain the needed tokens, read the [AWS documentation](http://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys){:target="_blank"}. + +> Note: + You must have an active registry set up in AWS.

            + Amazon ECR push/pull operations are supported with two permission options: user-based and resource-based. + + + * User-based permissions: User account must apply `AmazonEC2ContainerRegistryPowerUser` policy (or custom based on that policy). + For more information and examples, click [here](http://docs.aws.amazon.com/AmazonECR/latest/userguide/ecr_managed_policies.html){:target="_blank"}. + * Resource-based permissions: Users with resource-based permissions must be allowed to call `ecr:GetAuthorizationToken` before they can authenticate to a registry, and push or pull any images from any Amazon ECR repository, than you need provide push/pull permissions to specific registry. + For more information and examples, click [here](http://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicies.html){:target="_blank"}. + + +## Set up ECR integration for service account + +Setting up ECR integration for a service account applies to accounts with the Codefresh Runner installation. + +### Kubernetes service account setup +To use an IAM role, you must set up a Kubernetes service account, as described in the [AWS Documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html){:target="\_blank"}. +You can define the service account at four different levels, based on the required priority. The levels are listed below in ascending order of priority: + +* Runtime + The runtime level has the lowest priority. Define it in the Runtime Specification under `runtimeScheduler > Cluster` (same level as `namespace`), and specify the service account. The key is `serviceAccount`. Use the default, and make sure you have the correct annotation added to the service account. Another option is to create a new service account with the proper permissions and annotations. + +```yaml +runtimeScheduler: + cluster: + namespace: codefresh + clusterProvider: + accountId: 5c1658d1736122ee1114c842 + selector: docker-desktop + serviceAccount: codefresh-engine + +``` + +* Account + The Account-level service account has higher priority than the runtime-level service account. To define the service account at the account level, turn on the setting as part of the integration as described below. + +* Pipeline + The Pipeline-level service account has higher priority than the account-level service account. Define the service account as part of the pipeline's runtime settings (Pipeline > Settings > Runtime). + +* Trigger + The Trigger-level service account has the highest priority. Define the service account as part of the trigger settings for the specific pipeline (Workflow > Triggers (modify or add) > Advanced Options). + + +### How to + +**Before you begin** +* Define a Kubernetes service account for the runtime, account, pipeline, or pipeline-trigger + +**How to** + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Amazon ECR**. +1. Do the following: + * **Registry name**: Enter a unique name for this configuration. + * **Region**: Select the AWS region. + * Select **Resolve credentials from servce account**. + + The Access Key ID and Secret Access Key fields are disabled. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/add-amazon-ecr-registry.png" + url="/images/integrations/docker-registries/add-amazon-ecr-registry.png" + alt="Amazon EC2 Container Registry settings" + caption="Amazon EC2 Container Registry settings" + max-width="60%" %} + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + + +## Pushing Docker images to Amazon ECR + +There are two ways to push images: + +1. (Recommended) Using the YAML [push step]({{site.baseurl}}/docs/pipelines/steps/push/). +1. Manually promoting manually an image (described below) + +For more details on how to push a Docker image in a pipeline see the [build and push example]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/). + + + +### Manually promoting an image + + + +The **Images** view has an option to manually push images to a registry. +You need to specify the repository name as the name of your repository as set in ECR, as in the example below. + +{% include image.html +lightbox="true" +file="/images/integrations/docker-registries/ecr/ecr-manual-promote-repo-name.png" +url="/images/integrations/docker-registries/ecr/ecr-manual-promote-repo-name.png" +alt="Repository name in ECR" +caption ="Repository name in ECR" +max-width="40%" +%} + +1. In the Codefresh UI, from Artifacts in the sidebar, select [**Images**](https://g.codefresh.io/2.0/images){:target="\_blank"}. +1. Click **Promote**. + + {% include image.html +lightbox="true" +file="/images/integrations/docker-registries/ecr/ecr-manual-promote-button.png" +url="/images/integrations/docker-registries/ecr/ecr-manual-promote-button.png" +alt="Promote image icon" +caption="Promote image icon" +max-width="40%" +%} + +{:start="3"} +1. Do the following: + * Enter the **Repository Name**. + * Enter the **Tag**. Copy and paste the text after the `:` in the Repository Name. For example, `repository-name:tag`. + * From the **Registry** dropdown, select your ECR configuration. + +{% include image.html +lightbox="true" +file="/images/integrations/docker-registries/ecr/ecr-manual-promote-settings.png" +url="/images/f2b0ec5-ecr3.png" +alt="Image promotion settings" +caption="Image promotion settings" +max-width="40%" +%} + +{:start="3"} +3. Click **Promote**. + + +>It is possible to change the image name if you want, but make sure that the new name exists as a repository in ECR. + + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/) + diff --git a/_docs/integrations/docker-registries/azure-docker-registry.md b/_docs/integrations/docker-registries/azure-docker-registry.md new file mode 100644 index 000000000..e19f41f81 --- /dev/null +++ b/_docs/integrations/docker-registries/azure-docker-registry.md @@ -0,0 +1,73 @@ +--- +title: "Azure Docker registry" +description: "Use the Azure Docker Registry for pipeline integrations" +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/docker-registries/external-docker-registries/azure-docker-registry/ +toc: true +--- +Configure [Azure Docker registry](https://docs.microsoft.com/en-us/azure/container-registry/){:target=\_blank"} for pipeline integrations. + +## Configure Azure portal + +1. Log in to the Azure Portal. +1. Click **Settings** and from the sidebar, select **Access Keys**. + + {% include +image.html +lightbox="true" +file="/images/integrations/docker-registries/azure-registry-admin.png" +url="/images/integrations/docker-registries/azure-registry-admin.png" +alt="Docker credentials for the Azure registry" +caption="Docker credentials for the Azure registry" +max-width="80%" +%} + +1. For **Admin user**, click **Enable**. +1. Change the username (optional), and make sure that you note down one of the passwords shown on the screen. + +## Configure Azure Docker registry settings in Codefresh + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Other Registries**. +1. Define the following: + * **Registry Name**: Unique name for this configuration. + * **Username**: Your Azure Registry username. + * **Password**: Your Azure Registry password. + * **Domain**: `.azurecr.io`. + +{% include image.html + lightbox="true" +file="/images/integrations/docker-registries/add-azure-registry.png" +url="/images/integrations/docker-registries/add-azure-registry.png" +alt="Adding the Azure Docker registry" +caption="Adding the Azure Docker registry" +max-width="60%" %} + +{:start="5} +1. To verify the connection details, click **Test connection**. +1. To apply the changes, click **Save**. + +## Using the Azure Registry + +You can now use the Azure Registry in your CI pipelines, either via the UI or through the YAML [push step]({{site.baseurl}}/docs/pipelines/steps/push/) (recommended). + +It is also possible to use the registry from the command line with: + +``` +docker login .azurecr.io -u -p +``` + +You can also inspect the pushed images either using Azure portal or with [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/?view=azure-cli-latest){:target="\_blank"} + +``` +az acr repository list --name --output table +``` + + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Building and pushing an image]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/) \ No newline at end of file diff --git a/_docs/integrations/docker-registries/bintray-io.md b/_docs/integrations/docker-registries/bintray-io.md new file mode 100644 index 000000000..7b158628e --- /dev/null +++ b/_docs/integrations/docker-registries/bintray-io.md @@ -0,0 +1,91 @@ +--- +title: "Bintray.io/Artifactory" +description: "Use JFrog Bintray/Artifactory with pipeline integrations " +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/bitrayio/ + - /docs/docker-registries/external-docker-registries/bintray-io/ +toc: true +--- + +Configure JFrog Bintray/Artifactory as your Docker registry provider. +You need to get the API key for your profile, and the correct registry domain. + +>Passing Codefresh metadata to Bintray is supported through Grafeas. More info is available [in this blogpost](https://codefresh.io/blog/write-this-down-grafeas/){:target="_blank"}. + +## Set up Bintray integration + +**Before you begin** +* [Get your API key](#find-your-api-key) +* [Get your regsitry domain](#find-your-registry-domain) + +**How to** + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **JFrog Bintray**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Username**: Your Bintray.io/Artifactory username. + * **API key**: The Bintray.io/Artifactory API key you retrieved from your profile. + * **Domain**: Your Bintray.io registry address, for example, `docker-new-repository.bintray.io`, or Artifactory registry address, for example `my-company-docker-snapshot.jfrog.io`. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/add-bintray-registry.png" + url="/images/integrations/docker-registries/add-bintray-registry.png" + alt="JFrog Bintray registry settings" + caption="JFrog Bintray registry settings" + max-width="70%" %} + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + +## Getting Bintray.io settings + +To obtain Bintray.io information, follow the steps. + +### Find your API key + +1. Go to your Bitray.io profile. +1. Select **API Key** from the side menu. + +{% include image.html +lightbox="true" +file="/images/integrations/docker-registries/bintray/bintray-api-key.png" +url="/images/integrations/docker-registries/bintray/bintray-api-key.png" +alt="Bintray.io API key" +caption="Bintray.io API key" +max-width="60%" %} + +### Find your registry domain + +1. Navigate to your bintray.com repository, or add a new one. +1. Click **SET ME UP!**. +{% include image.html lightbox="true" file="/images/integrations/docker-registries/bintray/bintray-set-me-up.png" url="/images/integrations/docker-registries/bintray/bintray-set-me-up.png" alt="Bintray.io SET ME UP" caption="Bintray.io SET ME UP" max-width="45%" %} + +{:start="3"} +1. Copy the registry address. +{% include image.html lightbox="true" file="/images/integrations/docker-registries/bintray/bintray-domain.png" url="/images/integrations/docker-registries/bintray/bintray-domain.png" alt="Bintray.io registry address" caption="Bintray.io registry address" max-width="45%" %} + +### Basic metadata upload + +Codefresh automatically sets some version attributes in Bintray every time you upload a Docker image. + +{% + include image.html lightbox="true" + file="/images/integrations/docker-registries/bintray/bintray-metadata.png" + url="/images/integrations/docker-registries/bintray/bintray-metadata.png" + alt="Basic Bintray metadata" + caption="Basic Bintray metadata" + max-width="50%" + %} + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) \ No newline at end of file diff --git a/_docs/integrations/docker-registries/digital-ocean-container-registry.md b/_docs/integrations/docker-registries/digital-ocean-container-registry.md new file mode 100644 index 000000000..b36283a48 --- /dev/null +++ b/_docs/integrations/docker-registries/digital-ocean-container-registry.md @@ -0,0 +1,148 @@ +--- +title: "DigitalOcean Container Registry" +description: "Push Docker images to your DigitalOcean Container Registry with pipeline integration" +group: integrations +sub_group: docker-registries +toc: true +--- + +You can configure [DigitalOcean Container Registry](https://www.digitalocean.com/products/container-registry/){:target="\_blank"} as your Docker Registry, and use it in your Codefresh pipeline. + + +The DigitalOcean Container Registry is directly integrated into your DigitalOcean Dashboard. While it is optional to use the DigitalOcean Container registry with your DigitalOcean Kubernetes cluster, it allows for easier integration between resources. + +The next sections will look at: +1. Creating the DigitalOcean Container Registry +2. Generating a DigitalOcean Access token +3. Adding the DigitalOcean Container Registry to our Docker Registry in Codefresh +4. Modifying the Build step in our Codefresh pipeline +5. Viewing the built image in the DigitalOcean Container Registry + +## Building and pushing a container image with DigitalOcean and Codefresh + +Building and pushing a container image with DigitalOcean and Codefresh, requires: +* A DigitalOcean account (your GitHub username) +* A DigitalOcean access token +* An application with a Dockerfile to build images + +### Creating the DigitalOcean Container Registry + +Once you are logged into your DigitalOcean Account, open the `Container Registry` tap and provide the name of your registry. Note that the name has to be unique. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/digital-ocean/create-registry.png" + url="/images/integrations/docker-registries/digital-ocean/create-registry.png" + alt="Create Container Registry in DigitalOcean" + caption="Create Container Registry in DigitalOcean" + max-width="100%" +%} + +### Creating an access token + +Now that we are already in DigitalOcean, we have to create an [access token](https://www.digitalocean.com/docs/apis-clis/api/create-personal-access-token/){:target="\_blank"}. Note that it requires read and write access. +Select **API** at the bottom right of your left-side menu under **Tokens/Keys**. +Copy and paste the token somewhere secure and where you will find it again. + +## Set up DigitalOcean Container Registry integration + + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Other Registries**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Username**: The DigitalOcean access token you created. + * **Password**: The DigitalOcean access token you created. + * **Domain**: `registry.digitalocean.com`. + + Optional, you can add your registry name to the advanced settings section. For instance, if you named it in Digital Ocean "anais-codefresh", you can ensure that every time the registry is used, it is automatically referenced in the build step of your pipeline. + + + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + + +### Modify your build step + +Within your Codefresh YAML file, modify the build step to push to the DigitalOcean Container Registry. If you set the DigitalOcean Container Registry as default registry, note that you do not have to specify the Registry. + +Add the following line to your build step: +`registry: "digital-ocean"` + +This is an example of the complete build step: + +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "anais-codefresh/react-article-display" + # CF_BRANCH value is auto set when pipeline is triggered + # Learn more at codefresh.io/docs/docs/codefresh-yaml/variables/ + revision: "${{CF_BRANCH}}" + git: "github" + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "anais-codefresh/react-article-display-do-registry" + tags: + - "1.0.0" + working_directory: "${{clone}}" + dockerfile: "Dockerfile" + stage: "build" + registry: "digital-ocean" +{% endraw %} +{% endhighlight %} + +Note that Codefresh builds AND pushes images both in the same step. + +### Running the pipeline and viewing the image in the DigitalOcean Container Registry + +Once you have modified the step, save and run your pipeline. Below is an example of the pipeline in it's simplest form. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/digital-ocean/codefresh-pipeline.png" + url="/images/integrations/docker-registries/digital-ocean/codefresh-pipeline.png" + alt="Codefresh Pipeline" + caption="Codefresh Pipeline" + max-width="100%" +%} + +You can then view the image in the DigitalOcean Container Registry: + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/digital-ocean/container-registry-do.png" + url="/images/integrations/docker-registries/digital-ocean/container-registry-do.png" + alt="DigitalOcean Container Registry" + caption="DigitalOcean Container Registry" + max-width="100%" +%} + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Building Docker images]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) + diff --git a/_docs/integrations/docker-registries/docker-hub.md b/_docs/integrations/docker-registries/docker-hub.md new file mode 100644 index 000000000..46a71832f --- /dev/null +++ b/_docs/integrations/docker-registries/docker-hub.md @@ -0,0 +1,123 @@ +--- +title: "Docker Hub" +description: "Use DockerHub for pipeline integration" +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/dockerhub/ + - /docs/docker-registries/external-docker-registries/docker-hub/ +toc: true +--- +Configure Docker Hub as a Docker registry for CI pipelines to push images to it. + +1.[ Select **Docker Hub** as the registry provider]({{site.baseurl}}/docs/integrations/docker-registries/#general-configuration). +1. Define the following: + * Registry Name: A unique name for this configuration. + * Username: Docker Hub username. + * Password: Docker Hub [personal account token](https://docs.docker.com/docker-hub/access-tokens/){:target="\_blank"}, or Dockerhub account password (not recommended). + >If you have enabled [two-factor-authentication in Docker Hub](https://docs.docker.com/docker-hub/2fa/){:target="\_blank"}, then in the Password field, paste a Docker personal access token (instead of your Docker Hub master password). Otherwise, Codefresh will not be able to push your image. + If you don't have 2FA enabled in Dockerhub, then you can also use your Dockerhub account password. But in all cases we suggest you create a personal access token for Codefresh (personal access tokens are more secure as you can revoke them on demand and see when they were last used). + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/dockerhub/add-dockerhub-registry.png" + url="/images/integrations/docker-registries/dockerhub/add-dockerhub-registry.png" + alt="Add Docker Hub registry" + caption="Add Docker Hub registry" + max-width="50%" +%} + + +>Docker.io only allows you to push images that are tagged with your username. If you have a choice, create +a Docker Hub account with the same username that you have in Codefresh. If not, you need to change the Docker image +created to match your username in every [push step]({{site.baseurl}}/docs/pipelines/steps/push/#examples). + + +## Adding more Docker Hub integrations + +You can add additional Docker Hub accounts using the same process. + + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/dockerhub/two-dockerhub-integrations.png" + url="/images/integrations/docker-registries/dockerhub/two-dockerhub-integrations.png" + alt="Additional Docker Hub integrations" + caption="Additional Docker Hub integrations" + max-width="80%" +%} + + +You can specify which registry will be used as primary/default for the `docker.io` domain. +Use the appropriate `registry name` value in your pipelines in order to decide which Dockerhub account will be used. + +Here is a pipeline that pushes to two different Docker Hub accounts: + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/dockerhub/pushing-two-dockerhub-accounts.png" + url="/images/integrations/docker-registries/dockerhub/pushing-two-dockerhub-accounts.png" + alt="Pushing to multiple Dockerhub accounts" + caption="Pushing to multiple Dockerhub accounts" + max-width="90%" +%} + +This is the [definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) for the pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "push" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "kostis-codefresh/trivial-go-web" + revision: "master" + stage: "clone" + build: + title: "Building Docker image" + type: "build" + image_name: "trivial-go-web" + working_directory: "${{clone}}" + tag: "latest" + dockerfile: "Dockerfile.multistage" + stage: "build" + disable_push: true + push1: + title: "Pushing 1st Docker image" + type: push + image_name: "kostiscodefresh/trivial-go-web" + tag: "latest" + stage: "push" + registry: dockerhub + candidate: ${{build}} + push2: + title: "Pushing 2nd Docker image" + type: push + image_name: "kkapelon/trivial-go-web" + tag: "latest" + stage: "push" + registry: second-dockerhub + candidate: ${{build}} +{% endraw %} +{% endhighlight %} + +The two Dockerhub accounts are `kkapelon` and `kostiscodefresh`, and Codefresh automatically uses the correct integration by looking at the `image_name` property of the push step. + + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) + + + + + diff --git a/_docs/integrations/docker-registries/github-container-registry.md b/_docs/integrations/docker-registries/github-container-registry.md new file mode 100644 index 000000000..7d2ff3dd4 --- /dev/null +++ b/_docs/integrations/docker-registries/github-container-registry.md @@ -0,0 +1,186 @@ +--- +title: "GitHub Container Registry" +description: "Push Docker images to GitHub Container Registry with pipeline integrations" +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/integrations/docker-registries/github-packages/ +toc: true +--- + +Configure [GitHub Container Registry](https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry){:target="\_blank"} as your Docker Registry, and use it in your Codefresh pipeline. + + +The GitHub Container Registry allows you to host and manage Docker container images in your personal or organisation account on GitHub. One of the benefits is that permissions can be defined for the Docker image, independent of any repository. Thus, your repository could be private and your Docker image public. +See GitHub documentation for more [information on permissions](https://docs.github.com/en/free-pro-team@latest/packages/managing-container-images-with-github-container-registry/configuring-access-control-and-visibility-for-container-images){:target="\_blank"}. + +You can use the GitHub Container Registry manually or automate the process by connecting the registry to your Codefresh pipeline. + + +## Using the GitHub Container Registry + +You will need the following +* A GitHub account with your GitHub username +* A personal access token +* The Docker image you want to push or use in your Codefresh pipeline + +### Create a personal token + +The username to the registry is the same as your GitHub username. +For the password you need to [create a GitHub personal token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token){:target="\_blank"}. + +When creating the personal token, you need to select at least the following scopes: + +* `write:packages` +* `read:packages` +* `delete:packages` +* `repo` if your repository is private; if public, do not select + +Once you create the token, note it down. + +You can make sure that your token is valid by using it as a password on your local workstation with the Docker command: + +``` +docker login ghcr.io --username github-account +[Paste your GitHub token on this prompt] +``` + +**Important** Make sure that the URL is correct, otherwise, you will receive login errors later on. The github-account is your GitHub username. + +### Tag and push your Docker image + +After you are logged in, you can now tag and push your Docker image to the GitHub Container Registry. The first method is the manual setup with the Docker CLI, and the second one uses Codefresh to automate the process. + +Use the following command to tag your Docker image: + +``` +docker tag image-id ghcr.io/github-account/image-name:image-version +``` + +For example: + +``` +docker tag 5e369524eecb ghcr.io/anais-codefresh/react-example:1.0 +``` + +You can find your image-id by running: + +``` +docker images +``` + +Once pushed, you will see the Docker image in the packages section of your repository. +If you want, you can connect the Docker image to a repository [using the GitHub interface](https://docs.github.com/en/free-pro-team@latest/packages/managing-container-images-with-github-container-registry/connecting-a-repository-to-a-container-image) or by adding a label to your Dockerfile. + +``` +LABEL org.opencontainers.image.source https://github.com/OWNER/REPO +``` + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/github/manual-docker-push.png" + url="/images/integrations/docker-registries/github/manual-docker-push.png" + alt="Pushing a Docker image manually to GitHub packages" + caption="Pushing a Docker image manually to GitHub packages" + max-width="100%" +%} + +Now that you have verified your token, we can connect the registry to Codefresh. + +## Set up GitHub Container Registry integration + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Other Registries**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Username**: Your GitHub username. + * **Password**: Your GitHub personal token. + * **Domain**: `ghcr.io`. + * Expand **Advanced Options** and define the [**Repository Prefix**]({{site.baseurl}}/docs/integrations/docker-registries/#using-an-optional-repository-prefix) as your GitHub username. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/github/github-registry-codefresh.png" + url="/images/integrations/docker-registries/github/github-registry-codefresh.png" + alt="GitHub Container Registry settings" + caption="GitHub Container Registry settings" + max-width="70%" +%} + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + + +## Pushing Docker image to registry + +With the registry integration in place, you can now push a Docker image in any Codefresh pipeline, simply by mentioning the registry by name (`github-container-registry` in the example). + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/github/github-registry-pipeline.png" + url="/images/integrations/docker-registries/github/github-registry-pipeline.png" + alt="Codefresh pipeline for GitHub packages" + caption="Codefresh pipeline for GitHub packages" + max-width="100%" +%} + +Here is the definition of the Codefresh pipeline. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - "clone" + - "build" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "anais-codefresh/react-article-display" + revision: "${{CF_BRANCH}}" + git: "github" + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "react-article-display" + working_directory: "${{clone}}" + tags: + - "${{CF_BRANCH_TAG_NORMALIZED}}" + - "2.0.0" + dockerfile: "Dockerfile" + stage: "build" + registry: "github-container-registry" +{% endraw %} +{% endhighlight %} + +Notice: + +* The `registry: github-container-registry` property in the `build` step, which is the name of the registry that you set-up in the previous step +* The fact that we push multiple Docker tags in a single step; you can define all tags in the `build` step. + +After the pipeline has finished the Docker tags can also be seen in the GitHub packages section of the repository. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/github/multiple-docker-tags.png" + url="/images/integrations/docker-registries/github/multiple-docker-tags.png" + alt="Pushing different Docker tags" + caption="Pushing different Docker tags" + max-width="100%" +%} + +You can now treat this registry like any other Codefresh registry. + + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) diff --git a/_docs/integrations/docker-registries/google-artifact-registry.md b/_docs/integrations/docker-registries/google-artifact-registry.md new file mode 100644 index 000000000..fd9e36620 --- /dev/null +++ b/_docs/integrations/docker-registries/google-artifact-registry.md @@ -0,0 +1,64 @@ +--- +title: "Google Artifact Registry (GCAR)" +description: "Use Google Artifact Registry with pipeline integrations" +group: integrations +sub_group: docker-registries +toc: true +--- + +Configure GCAR (Google Artifact Registry) as your Docker registry provider. + +## Set up GCAR integration + +**Before you begin** +* [Generate a JSON key file](#generate-a-json-key-file) + +**How to** + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Google Artifact Registry**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Location**: Select the location. + * **JSON Keyfile**: The content of the generated JSON key file. + + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/google-artifact-registry-settings.png" + url="/images/integrations/docker-registries/google-artifact-registry-settings.png" + alt="Google Artifact Registry (GCAR) settings" + caption="Google Artifact Registry (GCAR) settings" + max-width="60%" %} + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + +## Generate a JSON key file +The JSON key file holds your credentials for a given [service account](https://cloud.google.com/compute/docs/access/service-accounts){:target="\_blank"}. +To generate your key file follow these instructions: + +1. Go to your [Cloud Platform Console Credentials page](https://console.cloud.google.com/apis/credentials){:target="\_blank"}. +1. Select the project that you're creating credentials for. +1. To set up a new service account, click **Create credentials**, and then select Service account key. +1. Choose the service account to use for the key. +1. Choose to download the service account's public/private key as a JSON file. + +You can find the complete guide [here](https://support.google.com/cloud/answer/6158849#serviceaccounts){:target="\_blank"}. + +## Working with multiple projects + +If you have more than one repository/project in Google cloud, you can connect multiple GCR registries and define one as the "primary" for the `gcr.io` domain. + +This means that every time Codefresh needs to pull an image it will use that integration. If you wish to use another project for pulling images, +you can use the `registry_context` property as described in [working with multiple registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain). + + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) \ No newline at end of file diff --git a/_docs/integrations/docker-registries/google-container-registry.md b/_docs/integrations/docker-registries/google-container-registry.md new file mode 100644 index 000000000..e87548886 --- /dev/null +++ b/_docs/integrations/docker-registries/google-container-registry.md @@ -0,0 +1,66 @@ +--- +title: "Google Container Registry (GCR)" +description: "Use GCR with pipeline integrations" +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/google-cloud-registry/ + - /docs/docker-registries/external-docker-registries/google-container-registry/ +toc: true +--- +Configure GCR (Google Container Registry) as your Docker registry provider. + +## Set up GCR integration + +**Before you begin** +* [Generate a JSON key file](#generate-a-json-key-file) + +**How to** + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Google Container Registry**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Domain**: Select the domain. + * **JSON Keyfile**: The content of the generated JSON key file. + + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/google-gcr-registry-settings.png" + url="/images/integrations/docker-registries/google-gcr-registry-settings.png" + alt="Google Container Registry (GCR) settings" + caption="Google Container Registry (GCR) settings" + max-width="60%" %}` + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + +## Generate a JSON key file +The JSON key file holds your credentials for a given [service account](https://cloud.google.com/compute/docs/access/service-accounts){:target="\_blank"}. +To generate your key file follow these instructions: + +1. Go to your [Cloud Platform Console Credentials page](https://console.cloud.google.com/apis/credentials){:target="\_blank"}. +1. Select the project that you're creating credentials for. +1. To set up a new service account, click **Create credentials**, and then select Service account key. +1. Choose the service account to use for the key. +1. Choose to download the service account's public/private key as a JSON file. + +You can find the complete guide [here](https://support.google.com/cloud/answer/6158849#serviceaccounts){:target="\_blank"}. + +## Working with multiple projects + +If you have more than one repository/project in Google cloud, you can connect multiple GCR registries and define one as the "primary" for the `gcr.io` domain. + +This means that every time Codefresh needs to pull an image it will use that integration. If you wish to use another project for pulling images, +you can use the `registry_context` property as described in [working with multiple registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain). + + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) \ No newline at end of file diff --git a/_docs/integrations/docker-registries/other-registries.md b/_docs/integrations/docker-registries/other-registries.md new file mode 100644 index 000000000..785cba530 --- /dev/null +++ b/_docs/integrations/docker-registries/other-registries.md @@ -0,0 +1,61 @@ +--- +title: "Other Registries" +description: "Connect any Docker registry for pipeline integration" +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/other-registries/ + - /docs/docker-registries/external-docker-registries/other-registries/ +toc: true +--- +Codefresh provides an option to configure a Docker Registry not in the list of Docker registry providers. +Use this option for any cloud or hosted registry that follows the V2 Docker registry protocol. + +Some examples of self-hosted registries are: +* The [official registry](https://github.com/docker/distribution){:target="\_blank"} by Docker +* [Nexus](https://www.sonatype.com/nexus-repository-sonatype){:target="\_blank"} by Sonatype +* [Harbor](https://goharbor.io/){:target="\_blank"} by VMware +* [Portus](http://port.us.org/){:target="\_blank"} by Suse +* [Container Registry](https://www.alibabacloud.com/product/container-registry){:target="\_blank"} by Alibaba +* [Openshift registry](https://www.openshift.com/){:target="\_blank"} by Redhat +* [Kraken](https://github.com/uber/kraken){:target="\_blank"} by Uber +* [Proget](https://inedo.com/proget){:target="\_blank"} by Inedo + +## Set up Other Registry integration + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Other Registries**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Username**: Your registry username.. + * **Password**: Your registry encrypted password. + * **Domain**: Your registry address, `mydomain.com`. + +{% include + image.html + lightbox="true" + file="/images/integrations/docker-registries/add-other-docker-registry.png" + url="/images/integrations/docker-registries/add-other-docker-registry.png" + alt="Other Registry settings" + caption="Other Registry settings" + max-width="60%" %} + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + +## Heroku Registries + +To authenticate to the Heroku registry, instead of using your password, you will need to use the authorization token. You can find that by running: + +{% highlight bash %} +heroku auth:token +{% endhighlight %} + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) diff --git a/_docs/integrations/docker-registries/quay-io.md b/_docs/integrations/docker-registries/quay-io.md new file mode 100644 index 000000000..fb2912951 --- /dev/null +++ b/_docs/integrations/docker-registries/quay-io.md @@ -0,0 +1,43 @@ +--- +title: "Quay.io" +description: "Use Quay registries with pipeline integration" +group: integrations +sub_group: docker-registries +redirect_from: + - /docs/quayio/ + - /docs/docker-registries/external-docker-registries/quay-io/ +toc: true +--- + +Configure Quay as your Docker registry provider. + +## Set up Quay integration + + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Other Registries**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Username**: Your `Quay.io` username. + * **Password**: Your `Quay.io` encrypted password. + * **Domain**: `quay.io`. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/add-quay-registry.png" + url="/images/integrations/docker-registries/add-quay-registry.png" + alt="Quay Docker registry settings" + caption="Quay Docker registry settings" + max-width="60%" %}` + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + +## Related articles +[Docker registries for pipeline integrations]({{site.baseurl}}/docs/integrations/docker-registries) +[Working with Docker Registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) +[Building and pushing an image]({{site.baseurl}}/docs/yaml-examples/examples/build-and-push-an-image/) \ No newline at end of file diff --git a/_docs/integrations/gcloud-builder.md b/_docs/integrations/gcloud-builder.md new file mode 100644 index 000000000..06064a175 --- /dev/null +++ b/_docs/integrations/gcloud-builder.md @@ -0,0 +1,312 @@ +--- +title: "Google Cloud Builder" +description: "Use the Google Cloud builder to create Docker images in Codefresh pipelines" +group: integrations + +toc: true +--- + +Google Cloud builder is an online service that allows you to build Docker images using the Google infrastructure and also push them to the Google Cloud registry. + +You can also use Cloud builder in a Codefresh pipeline in place of the [normal build step]({{site.baseurl}}/docs/pipelines/steps/build/). This way you can take advantage of the Cloud builder in your Codefresh pipelines, but still push to other registries that are connected to Codefresh (and not just GCR). + + +## Prerequisites + +To use the Cloud builder service in your Codefresh pipeline you need: + +1. A free Docker Hub account and [Docker Hub connected to Codefresh]({{site.baseurl}}/docs/integrations/docker-registries/docker-hub/). +1. A Google Cloud subscription and a [service account for the Cloud builder service](https://cloud.google.com/cloud-build/docs/securing-builds/set-service-account-permissions){:target="\_blank"}. + +Save your service account as a JSON file, and make sure you select at least the [following roles](https://cloud.google.com/container-registry/docs/access-control){:target="\_blank"}: + +* Cloud storage Admin +* Storage Admin +* Storage Object Viewer +* Storage Object Creator + +You will use this JSON file either by integrating a [Google Docker registry]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/) in Codefresh, or directly in a pipeline as we will see later. + +## How it works + +The Google Cloud builder integration/authentication can be used in the following ways: + +1. Authentication is retrieved from the GCR integration in your Codefresh account, and the resulting Docker image: + * Is also be pushed to GCR. + * Is pushed to any other [external registry connected to Codefresh]({{site.baseurl}}/docs/integrations/docker-registries/). +1. Authentication is defined in the pipeline itself, and the resulting image can be pushed to any registry connected to Codefresh + +In the first case, you will define the service account file centrally in the GCR integration screen, and then any pipeline can authenticate to Google Cloud builder without any further configuration. + +{% + include image.html + lightbox="true" + file="/images/integrations/docker-registries/add-gcr-registry.png" + url="/images/integrations/docker-registries/add-gcr-registry.png" + alt="Using the JSON service account in Codefresh" + caption="Using the JSON service account in Codefresh" + max-width="50%" +%} + + + +## Using Google Cloud builder in a Codefresh pipeline + +In the most straightforward scenario, you want to create a Docker image with Google Cloud builder and also push to GCR. + +{% include image.html +lightbox="true" +file="/images/integrations/gcloud-builder/build-push-gcr.png" +url="/images/integrations/gcloud-builder/build-push-gcr.png" +max-width="90%" +caption="Using Google cloud builder in Codefresh" +alt="Using Google cloud builder in Codefresh" +%} + +Here is the full pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-golang-image + working_directory: ./ + tag: slim + registry: gcr + dockerfile: Dockerfile.multistage + provider: + type: gcb + arguments: + cache: + repo: "my-kaniko-cache" + ttl: "10h" +{% endraw %} +{% endhighlight %} + + +The `build` step of the pipeline has an extra property, `provider`, that specifies we want to use Cloud builder instead of the Codefresh native build step. + +The only required argument is the repository to be used for [Kaniko caching](https://cloud.google.com/cloud-build/docs/kaniko-cache){:target="\_blank"} and speed up subsequent builds. + +>Note that the Kaniko repo should NOT be the same as the repository used for the image itself. + +{% include image.html +lightbox="true" +file="/images/integrations/gcloud-builder/image-dashboard.png" +url="/images/integrations/gcloud-builder/image-dashboard.png" +max-width="70%" +caption="Inspecting an image from Google Cloud build" +alt="Inspecting an image from Google Cloud build" +%} + +After runing the pipeline, you will see your Docker image in the [Image dashboard]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/). + +The Docker image is also visible in the Google Cloud Console view of your registry. + +### Pushing to a different registry + +Even though the Cloud builder pipeline step authentication is fetched from the GCR configuration, you don't have to push to GCR. +To push the Docker image to another connected registry, simply change the `registry` property in the build step: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-golang-image + working_directory: ./ + tag: slim + registry: azure + dockerfile: Dockerfile.multistage + provider: + type: gcb + arguments: + cache: + repo: "my-kaniko-cache" + ttl: "10h" +{% endraw %} +{% endhighlight %} + +This pipeline pushes the Docker image created to another registry that is identified by [azure]({{site.baseurl}}/docs/integrations/docker-registries/azure-docker-registry/). + +### Authenticating to Cloud Builder in the pipeline + +If you don't want to reuse the Registry integration provided by Codefresh for easy authentication to Google Cloud builder, you can also use your service account JSON file directly in the pipeline. + +You can pass the contents of the JSON file as a variable in the pipeline and the build step will use it to authenticate. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-golang-image + working_directory: ./ + tag: slim + registry: azure + dockerfile: Dockerfile.multistage + provider: + type: gcb + arguments: + google_app_creds: '${{G_CREDS_B64}}' + cache: + repo: "my-kaniko-cache" + ttl: "10h" +{% endraw %} +{% endhighlight %} + +Here the pipeline will try to authenticate to Google Cloud builder using the contents of the `google_app_creds` property. + +The value of this property can be a pipeline variable, or project variable or any other standard Codefresh method such as [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). + +You need to escape the contents of the service account before you use in the pipeline with either of these commands on your local workstation: + +* `cat _json_key_file | base64 | tr -d ‘\n’` +* `cat _json_key_file | base64 -w 0` + +### Using extra properties for Google Cloud builder + +The build step has several other properties can be used to fine-tune the Google Cloud builder behavior. + +Here is the full syntax: + + +{% highlight yaml %} +{% raw %} + +step_name: + type: build + title: Step Title + description: Free text description + working_directory: ${{clone_step_name}} + dockerfile: path/to/Dockerfile + image_name: owner/new-image-name + tag: develop + build_arguments: + - key=value + target: stage1 + no_cache: false + no_cf_cache: false + fail_fast: false + registry: my-registry + provider: + type: gcb + arguments: + google_app_creds: '${{G_CREDS_B64}}' + cache: + repo: "repositoryname/kaniko-cache" + ttl: "10h" + timeout: "600s" + machineType: 'N1_HIGHCPU_8' + logsBucket: "gs://your-project_cloudbuild/logs" + diskSizeGb: 10 + +{% endraw %} +{% endhighlight %} + +The extra fields are: + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ------------------------------------------------------ | ------------------------- | +| `type` | Defines which provider to use (currently `gcb` and `cf` types are available). It uses `cf` provider by default and the whole provider section can be omitted for a regular build step. | Required | +| `arguments` | Parameters for Google Cloud builder | Required | +| `google_app_creds` | base64 encoded string of the [Google app credentials JSON](https://cloud.google.com/docs/authentication/production){:target="\_blank"}. By default, taken from the existing GCR integration. | Optional | +| `cache` | The list of Kaniko cache parameters | Required | +| `repo` | Docker repository path for the Kaniko cache | Required | +| `ttl` | Kaniko cache retention. Default value is `336h` | Optional | +| `timeout` | This field is directly translated into the corresponding field of the [GCB manifest file](https://cloud.google.com/cloud-build/docs/build-config#structure_of_a_build_config_file){:target="\_blank"}. Default is `10m` | Optional | +| `machineType` | This field is directly translated into the corresponding field of the [GCB manifest file](https://cloud.google.com/cloud-build/docs/build-config#structure_of_a_build_config_file){:target="\_blank"} | Optional | +| `diskSizeGb` | This field is directly translated into the corresponding field of the [GCB manifest file](https://cloud.google.com/cloud-build/docs/build-config#structure_of_a_build_config_file) {:target="\_blank"} | Optional | +| `logsBucket` | This field is directly translated into the corresponding field of the [GCB manifest file](https://cloud.google.com/cloud-build/docs/build-config#structure_of_a_build_config_file){:target="\_blank"} | Optional | + + + + +The step also accepts all the field of the [standard build step]({{site.baseurl}}/docs/pipelines/steps/build/) but notice that the following fields are not supported in the current implementation and simply ignored by the GCB step logic: + +* `no_cache` +* All the [buildkit]({{site.baseurl}}/docs/pipelines/steps/build/#buildkit-support) related fields + +Here is an example that uses all possible fields: + + `YAML` +{% highlight yaml %} +{% raw %} +GCBuild: + type: build + image_name: '${{IMAGE_NAME}}' + working_directory: ${{CloneStep}} + tag: your-tag1 + tags: + - your-tag2 + - your-tag3 + target: 'test' + no_cf_cache: false + metadata: + set: + - qa: pending + build_arguments: + - WORD=Hello + registry: 'reg-integration-name' + dockerfile: + content: |- + FROM alpine as test + RUN apk add skopeo + ARG WORD + RUN echo $WORD + provider: + type: gcb + arguments: + google_app_creds: '${{G_CREDS_B64}}' + cache: + repo: "repositoryname/kaniko-cache" + ttl: "10h" + timeout: "600s" + machineType: 'N1_HIGHCPU_8' + logsBucket: "gs://your-project_cloudbuild/logs" + diskSizeGb: 10 +{% endraw %} +{% endhighlight %} + + + + + + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Google Registry integration]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/) +[Build and push an image]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image/) + + + diff --git a/_docs/integrations/git-providers.md b/_docs/integrations/git-providers.md new file mode 100644 index 000000000..496a7d631 --- /dev/null +++ b/_docs/integrations/git-providers.md @@ -0,0 +1,400 @@ +--- +title: "Git Providers" +description: "Easily check out code in Codefresh CI pipelines" +group: integrations +redirect_from: + - /docs/git-provider/ + - /docs/integrations/ + - /docs/integrations/git-provider/ + - /docs/integrations/git-providers/integrating-codefresh-with-multiple-git-providers/ + - /docs/integrations/git-providers/configure-a-bitbucket-server-webhook/ +toc: true +--- +Creating an account with Codefresh using one of the supported Git providers (GitHub, GitLab, Bitbucket) gives you immediate access to the repositories of the linked provider. + +You can add repositories from the other Git providers regardless of the one that you used for sign-up. For example, you can use GitLab to sign up with Codefresh, but still build repositories that exist in Bitbucket. + +You can even add multiple accounts from each Git provider (if you have more than one) allowing you to use Codefresh as a central CI/CD solution that can access all your Git repositories regardless of the backing Git provider. + +Currently Codefresh supports: + +* GitHub Cloud +* GitHub On-premises +* Bitbucket +* GitLab Cloud +* GitLab On-premises +* Azure DevOps Git +* Atlassian Stash (old version of Bibucket Server) +* Bitbucket Server (new version of Stash) + +Atlassian Stash/Bitbucket server as well as the on-premises version of GitLab and GitHub are only available to Codefresh enterprise customers. + +## Adding more Git providers to your Codefresh Account + +By default, you have direct access to Git repositories that exist in the Git provider that you used while signing up for Codefresh. You can easily create Codefresh projects that checkout code from that Git provider without any extra configurations. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Git** and then click **Configure**. +1. From the **Add Git Provider** drop-down, select the Git provider to add. +1. Define the settings as required. + + +{% include image.html lightbox="true" file="/images/integrations/codefresh-integrations.png" url="/images/integrations/codefresh-integrations.png" alt="Codefresh Account Integration" max-width="80%" %} + + +{% include image.html +lightbox="true" +file="/images/integrations/git/git-provider-menu.png" +url="/images/integrations/git/git-provider-menu.png" +max-width="60%" +caption="Add Git provider" +alt="Add Git provider" +%} + +For each Git provider you need to set up authentication, for Codefresh to get access to the public and private repositories of the respective provider. + +The easiest way to set up authentication is with OAuth2 if supported by the Git provider. You only need to name your integration +and Codefresh will automatically set it up once you accept the permissions required. If you have problems with OAuth2 +or the provider does not support it, you need to manually create credentials by yourself in your git account and then enter them into Codefresh. + +In the case of an on-premises Git provider you also need to fill in the URL where the provider is installed. + +## SSH Keys + +> Please contact support to enable this feature. + +You have the ability to specify whether you want to clone via HTTPS or SSH. + +1. Select the required Git integration, and click **Edit**. +1. Expand **Advanced Options** and toggle to **HTTPS** or **SSH**. +1. For SSH, paste your **raw**, private key into the SSH Key text box and click **Save**. + + +{% include image.html +lightbox="true" +file="/images/integrations/git/github-ssh.png" +url="/images/integrations/git/github-ssh.png" +max-width="40%" +caption="Git clone via SSH" +alt="Git clone via SSH" +%} + + +For more information on generating SSH keys and adding your public key to your VCS provider, see its official documentation: + +* [GitHub documentation](https://help.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent){:target="\_blank"} +* [GitLab documentation](https://docs.gitlab.com/ee/ssh/#generating-a-new-ssh-key-pair){:target="\_blank"} +* [Bitbucket documentation](https://confluence.atlassian.com/bitbucket/set-up-an-ssh-key-728138079.html){:target="\_blank"} +* [Azure documentation](https://docs.microsoft.com/en-us/azure/devops/repos/git/use-ssh-keys-to-authenticate?view=azure-devops&tabs=current-page){:target="\_blank"} + +## GitHub + +For the **OAuth2 method** you only need to decide on public/private repository access, enter a name for your connection and click *Save*. Then accept the permissions dialog. This is the easiest and recommended way to integrate GitHub. Notice that if +you used GitHub when you [created your Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/), this integration is already setup for you. + +For the **Access Token** method you need + +* A friendly name for the Git context (it can be anything you want) +* An access token + +>Note that the access token for an organization should be created by somebody who has **Owner** role and not just **Member** role. + +To create an [access token](https://github.com/settings/tokens){:target="\_blank"}, go to your GitHub *settings* and select the *Developer settings* option from the left +sidebar. Then select *Personal access tokens* from the left menu. +For more information see the [GitHub Documentation page](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/){:target="_blank"}. + +The "token description" you enter in your GitHub account in order to create the token is completely arbitrary (use "Codefresh" for an example). Once you have the token, paste it in the Codefresh UI and click *Test connection*. If everything is OK, you can +now save the Git integration. + +The minimum permissions for the token are: + +* `repo.*` +* `admin:repo_hook.*` + +{% include image.html +lightbox="true" +file="/images/integrations/git/github-required-scopes.png" +url="/images/integrations/git/github-required-scopes.png" +max-width="40%" +caption="GitHub permissions" +alt="GitHub permissions" +%} + +For GitHub on-premises you also need to provide the URL of the GitHub server in your organization. If enabled in your account you can setup [Pipeline definition restrictions]({{site.baseurl}}/docs/administration/access-control/#pipeline-definition-restrictions) by expanding the *YAML Options* segment. + +### Using External Secrets for GitHub Token + +If your GitHub installation is behind your firewall, you can also +use any [external secrets that you have defined]({{site.baseurl}}/docs/integrations/secret-storage/) (such as Kubernetes secrets) as values by entering a secrets value +with the same syntax [shown in pipelines]({{site.baseurl}}/docs/pipelines/secrets-store/). + +For example if you already have a `token` on a resource call `git-credentials` you can put in the token field the expression {% raw %}`${{secrets.git-credentials.token}}`{% endraw %}. + +### Level of access + +When the admin clicks off "Allow access to all users" another toggle appears; “Allow these credentials to be shared within a pipeline for cloning a repository“ + +1. When its turned on, the user that runs a pipeline will be able to clone the repo. +2. When its turned off, the user that runs a pipeline cannot use this integration. +In both cases, the user cannot decrypt the token used in Git integration with CLI or API. + +>Important note: The credentials will be shared only to clone repos using an official git-clone step. + +## GitHub-App + +An alternative way to authenticate with Github is via the App mechanism. + +### Codefresh Github App + +> Note: The Codefresh App has READ permissions to issues, metadata, and pull requests, and READ and WRITE permissions to code, commit statuses, and repository hooks. If you need additional permission for your integration, use the Manual Creation steps. + +1. In the Codefresh UI, follow the steps to [add a new Git provider](#adding-more-git-providers-to-your-codefresh-account). +1. From the list of Git providers, select **Codefresh Github App**. +1. Select Setup GitHub App integration via [**GitHub Marketplace**](https://github.com/apps/codefresh-githubapp){:target=\_blank"}. +1. Follow the instructions on GitHub to install the application. + Once completed, the fields are automatically populated with the information. +1. To verify your integration, click **Test connection**. +1. To apply your changes, click **Save**. + +### Manual creation + +**Step 1** - Log in your GitHub account and visit [https://github.com/settings/apps](https://github.com/settings/apps){:target="\_blank"}. Click the *New GitHub App* button. + +**Step 2** - On the New app screen + +1. Give an arbitrary name to your app (e.g. codefresh-integration) +1. Fill *Homepage URL* with `http://www.codefresh.io` +1. Uncheck the *Active* checkbox under the Webhook section +1. In the *Repository permissions* section give the minimum of + * **Contents** - read + * **Issues** - read + * **Metadata** - read + * **Pull requests** - read + * **Webhooks** - read, write + * **Commit statuses** - read, write + * **Email addresses** - read +1. Click the *Create GitHub app* button. + +**Step 3** - In the next screen + +1. Note down the *App ID* number under the *About* section +1. Click the *Generate a private key* button and save the file locally + +**Step 4** - Click the *Install App* item from the left sidebar menu and then click the *Install* button next to your codefresh app + +**Step 5** - Accept the permissions and in the next screen define the repositories that you need Codefresh to access + +Also from the URL of the browser note the ending number (this is your installation id). For example if the URL is `https://github.com/settings/installations/10042353` then your installation number is 10042353 + +**Step 6** - Visit [https://g.codefresh.io/account-admin/account-conf/integration/git](https://g.codefresh.io/account-admin/account-conf/integration/git) in Codefresh, add a new Git provider and choose *Github App* from the drop-down menu + +For the required fields use: + +* **Installation id** - found in step 5 +* **App ID** - found in step 3 +* **Private key** - the contents of the file your created in step 3 (but convert it to base64 first) + +Click *Test connection* to verify your integration and apply your changes with the *Save* button. If enabled in your account you can setup [Pipeline definition restrictions]({{site.baseurl}}/docs/administration/access-control/#pipeline-definition-restrictions) by expanding the *YAML Options* segment. + +## GitLab + +For the **OAuth2 method** you only need to enable private repository access, enter a name for your connection and click *Save*. Then accept the permissions dialog. This is the easiest and recommended way to integrate GitLab. Notice that if +you used GitLab when you [created your Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/), this integration is already setup for you. + +For the **Access Key** method you need: + +* A friendly name for the Git context (it can be anything you want.) +* An access token/key + +To create an access token, go to your GitLab *settings* and select the *Access tokens* options. +For more information see the [GitLab Documentation page](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html){:target="_blank"} + +The name you enter in order to create the token in the GitLab UI is completely arbitrary (use "Codefresh" for an example) + +Once you have the token, paste it in the Codefresh UI and click *Test connection*. If everything is OK can +now save the Git integration. + +For GitLab on-premises you also need to provide the URL of the GitLab server in your organization. If enabled in your account you can setup [Pipeline definition restrictions]({{site.baseurl}}/docs/administration/access-control/#pipeline-definition-restrictions) by expanding the *YAML Options* segment. + +### Using External Secrets for GitLab Token + +If your GitLab installation is behind your firewall, you can also +use any [external secrets that you have defined]({{site.baseurl}}/docs/integrations/secret-storage/) (such as Kubernetes secrets) as values by entering a secrets value +with the same syntax [shown in pipelines]({{site.baseurl}}/docs/pipelines/secrets-store/). + +For example if you already have a `token` on a resource call `git-credentials` you can put in the token field the expression {% raw %}`${{secrets.git-credentials@token}}`{% endraw %}. + +## Bitbucket + +For the **OAuth2 method** you only need to enter a name for your connection and click *Save*. Then accept the permissions dialog. This is the easiest and recommended way to integrate Bitbucket. Notice that if +you used Bitbucket when you [created your Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/), this integration is already setup for you. + +For the **Application Password** method you need: + +* A friendly name for the Git context (It can be anything you want.) +* The name of your Bitbucket account/email address +* A Bitbucket application password + +To create an application password, go to your *Bitbucket settings* and select *App passwords* from the sidebar. +Click the button to create one. For more information see the [Bitbucket Documentation page](https://confluence.atlassian.com/bitbucket/app-passwords-828781300.html){:target="_blank"}. + +The minimum permissions needed by Codefresh are shown below. + +{% include image.html +lightbox="true" +file="/images/integrations/git/bitbucket-permissions.png" +url="/images/integrations/git/bitbucket-permissions.png" +max-width="40%" +caption="Bitbucket permissions" +alt="Bitbucket permissions" +%} + +The "label" you enter in your Bitbucket account in order to create the application password is completely arbitrary (use "Codefresh" for an example). Once you have the token, paste it in the Codefresh UI and click *Test connection*. If everything is OK you can +now save the Git integration. + + If enabled in your account you can setup [Pipeline definition restrictions]({{site.baseurl}}/docs/administration/access-control/#pipeline-definition-restrictions) by expanding the *YAML Options* segment. + +## Azure DevOps + +For Azure you need to create a [personal access token](https://docs.microsoft.com/en-us/azure/devops/integrate/get-started/authentication/pats?view=azure-devops){:target="\_blank"}. Sign in your Azure DevOps account and click on your profile icon on the top right corner. Then select *Security*: + +{% include image.html +lightbox="true" +file="/images/integrations/git/azure-devops-security.png" +url="/images/integrations/git/azure-devops-security.png" +max-width="60%" +caption="Azure DevOps Security" +alt="Azure DevOps Security" +%} + +On the screen that will appear click the *New token* Button. Enter an arbitrary name for the token and select the correct +**Organization** from the drop-down menu. Remember your organization name as you will use it later in the Codefresh side. +Select an expiration date for your token + +> At the time of writing Azure DevOps does not have the option to create a token that is valid for ever. Choose a large +time period and make sure that you have a policy in place for renewing your tokens so that Codefresh can continue to read your Git repo. + +{% include image.html +lightbox="true" +file="/images/integrations/git/azure-devops-token.png" +url="/images/integrations/git/azure-devops-token.png" +max-width="60%" +caption="Azure DevOps Token" +alt="Azure DevOps Token" +%} + +From the *Scope* section choose the option *Show all scopes* and choose the following: + +* Code - read +* Code - status +* Graph - read +* Project and Team - read +* User profile - read + +Finally click the *Create* button and copy your token (it will never be shown again). + +Then at the Codefresh configuration enter your organization name and your token. + +{% include image.html +lightbox="true" +file="/images/integrations/git/azure-devops-verify.png" +url="/images/integrations/git/azure-devops-verify.png" +max-width="40%" +caption="Codefresh integration with Azure Devops" +alt="Codefresh integration with Azure Devops" +%} + +Click on *Test connection* to verify your settings and finally click save. Now you can [create pipelines]({{site.baseurl}}/docs/pipeline /pipelines/) +that use Azure DevOps Git repos. + +{% include image.html +lightbox="true" +file="/images/integrations/git/azure-devops-connected.png" +url="/images/integrations/git/azure-devops-connected.png" +max-width="40%" +caption="Codefresh integration with Azure Devops" +alt="Codefresh integration with Azure Devops" +%} + +Your Azure DevOps repositories will be available when [creating a new project in Codefresh]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/). + + If enabled in your account you can setup [Pipeline definition restrictions]({{site.baseurl}}/docs/administration/access-control/#pipeline-definition-restrictions) by expanding the *YAML Options* segment. + +## Atlassian Stash + +Atlassian stash is only available for an on-premises connection. Follow the same instructions as Bitbucket. +You also need to provide the URL of the Stash server in your organization. + +This option is only for Atlassian stash until version 3.10 which is the old version. It was then renamed +to Bitbucket server. + +## Bitbucket Server + +Bitbucket server is the new and current name of Atlassian Stash. Again, it is only available for an on-premises +installation. + +Codefresh supports Bitbucket server versions 5.4.0+ since those expose the API used by the integration. + +### Using External Secrets for BitBucket Token + +If your Bitbucket Server installation is behind your firewall, you can also +use any [external secrets that you have defined]({{site.baseurl}}/docs/integrations/secret-storage/) (such as Kubernetes secrets) as values by entering a secrets value +with the same syntax [shown in pipelines]({{site.baseurl}}/docs/pipelines/secrets-store/). + +For example if you already have a `token` on a resource call `git-credentials` you can put in the token field the expression {% raw %}`${{secrets.git-credentials@token}}`{% endraw %}. + +## Using your Git provider + +Once your provider is active, you can add a new project into Codefresh and then during the [repository selection screen]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) you will have access to the additional Git providers. + +{% include image.html +lightbox="true" +file="/images/integrations/git/select-git.png" +url="/images/integrations/git/select-git.png" +max-width="60%" +caption="Select Git provider" +alt="Select Git provider" +%} + +>Notice that for all supported Git providers Codefresh will automatically create all the webhooks needed for +triggering pipelines when a commit (or another event) happens. + +After adding the repository Codefresh will behave exactly the same, regardless of the selected Git provider. +You will be able to [create pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) for different Git providers in exactly the same manner. + +## ABAC for Git Contexts + +**Account Level:** Pro and above + +> At this time, you will need to reach out to support to enable ABAC for Git Context and [Pipeline Execution Context]({{site.baseurl}}/docs/administration/pipeline-execution-context/). + +ABAC for Git Context gives the ability to restrict using and handling of Git Contexts. We use tags on the git context to limit Teams and Execution Contexts for access control. There are four actions controlled by ABAC: Creating, Updating, Deleting, and Using Git Contexts. + +The Using means the following use cases: + +* Creating trigger +* Getting YAML from a repository +* Using the Git Context in a pipeline (git clone step etc.) via Execution Context. + +You will get an error of Permission Denied or Forbidden to a Git Context that you do not have the correct permissions for that action. + +### Tagging the Git Context + +1. Navigate to Account Settings > Integrations > Configure for Git. +1. Hovering over the integration name (Git Context), you will see "Edit Tags" just before the edit symbol. +1. Select "Edit Tags," and you can add and remove tags. +1. Click Save when done. + +### Setting the Permissions + +1. Navigate to Account Settings > Permissions > Teams or Execution Context. +1. Scroll to Git Contexts. +1. Here, you can set [permissions]({{site.baseurl}}/docs/administration/access-control/#creating-a-security-policy) similar to other ABAC rules for Teams or Execution Context to Create or Use, Update, and Delete actions. +1. Click Add Rule when done. + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +[Checking out source code]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) + diff --git a/_docs/integrations/github-actions.md b/_docs/integrations/github-actions.md new file mode 100644 index 000000000..fce8587be --- /dev/null +++ b/_docs/integrations/github-actions.md @@ -0,0 +1,161 @@ +--- +title: "GitHub Actions pipeline integration" +description: "Using the GitHub action converter in Codefresh pipelines" +group: integrations + +toc: true +--- + +[GitHub Actions](https://github.com/features/actions){:target="\_blank"} are a set of reusable workflows that can be composed to create automation sequences for GitHub projects. GitHub Actions are supported natively with GitHub, but you can also use them in Codefresh pipelines by automatically converting them to [Codefresh pipeline steps]({{site.baseurl}}/docs/pipelines/steps/). + + +{% include image.html +lightbox="true" +file="/images/integrations/github-actions/github-actions-marketplace.png" +url="/images/integrations/github-actions/github-actions-marketplace.png" +max-width="60%" +caption="GitHub Actions Marketplace" +alt="GitHub Actions Marketplace" +%} + +By using GitHub Actions in your marketplace, you have the following benefits: +* Access to a vast catalog of reusable pipeline components +* Ability to use GitHub Actions with any provider, as Codefresh supports [Bitbucket, GitLab, Azure Git etc.]({{site.baseurl}}/docs/integrations/git-providers/) + + +## Prerequisites + +To use a GitHub Action in Codefresh you need to make sure that the following apply: + +1. The [GitHub action](https://github.com/marketplace?type=actions){:target="\_blank"} you have selected is Docker-based and has a self-contained and valid Dockerfile +1. You have read the documentation of the GitHub Action and know what arguments/input it requires + + +>Tip: + Since GitHub Actions are created by the community, it is your responsibility to filter and curate any GitHub Action you wish to use in Codefresh pipelines. If for example you use a GitHub Action that is then removed by its owner, the Codefresh pipeline that uses it will break as well. + We suggest you first use a GitHub Action in a GitHub workflow in order to understand its requirements, before you use it in a Codefresh pipeline. + +## How it works + +Codefresh offers a `github-action-to-codefresh` step converter. +This converter has two functions: + +1. When you create your pipeline it will analyze the GitHub Action and find what arguments it requires. + You must then define the values needed by yourself. +1. When the pipeline runs, it automatically finds the Dockerfile of the GitHub Action, builds it, and makes available the Docker image in any subsequent step in the same pipeline. + +All this process is automatic. You just need to make sure that all arguments/inputs of the GitHub Action are provided using [pipeline variables]({{site.baseurl}}/docs/pipelines/pipelines/#creating-new-pipelines), [shared configuration]({{site.baseurl}}/docs/pipeplines/pipeline/shared-configuration/), or any other standard mechanism you already use in Codefresh. + +## Inserting a GitHub Action in Codefresh pipeline + +1. [Create a Codefresh pipeline]({{site.baseurl}}/docs/pipelines/pipelines/#creating-new-pipelines) by visiting the pipeline editor. +1. In the **Steps** tab on the right-hand side, search for `actions` and select **GitHub Actions**. + +{% include image.html +lightbox="true" +file="/images/integrations/github-actions/github-action-step-browser.png" +url="/images/integrations/github-actions/github-action-step-browser.png" +max-width="50%" +caption="Step browser" +alt="Step browser" +%} + +{:start="3"} +1. Scroll down to find the GitHub Action that you want to use or enter a keyword to filter the list. + +{% include image.html +lightbox="true" +file="/images/integrations/github-actions/select-github-action.png" +url="/images/integrations/github-actions/select-github-action.png" +max-width="70%" +caption="Select GitHub action" +alt="Select GitHub action" +%} + +{:start="4"} +1. Click on the GitHub Action you want to use in your pipeline. + Codefresh displays the [YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) for the action on the right. This is the step that you need to insert in your pipeline. + +{% include image.html +lightbox="true" +file="/images/integrations/github-actions/snyk-action-arguments.png" +url="/images/integrations/github-actions/snyk-action-arguments.png" +max-width="70%" +caption="Using the Snyk GitHub action" +alt="Using the Snyk GitHub action" +%} + + The YAML snippet is a template, and you need to fill the `env` block below the `arguments` block. + The required environment variables are specific to each GitHub Action, so check the documentation of the action itself. + + + In the example above, we use the Snyk GitHub Action. By visiting the [documentation page](https://github.com/marketplace/actions/snyk-cli-action){:target="\_blank"}, we find that this action expects a `SNYK_TOKEN` as input. + + We therefore add the token as a pipeline variable: + +{% include image.html +lightbox="true" +file="/images/integrations/github-actions/environment-variables.png" +url="/images/integrations/github-actions/environment-variables.png" +max-width="60%" +caption="Pipeline variables" +alt="Pipeline variables" +%} + +Here is the final pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + snyk-cli-action: + title: snyk + description: snyk + type: github-action-executor + arguments: + url: 'https://github.com/marketplace/actions/snyk-cli-action' + envs: + - SNYK_TOKEN: '${{SNYK_TOKEN}}' + cmd: test alpine@latest +{% endraw %} +{% endhighlight %} + +The `cmd` property is specific to each GitHub action and in the case of Snyk we say we want to scan an alpine image for security issues. + + + +## Running a Codefresh pipeline with GitHub Actions + + +You can run the pipeline as any other Codefresh pipeline. + +{% include image.html +lightbox="true" +file="/images/integrations/github-actions/github-action-pipeline.png" +url="/images/integrations/github-actions/github-action-pipeline.png" +max-width="80%" +caption="Using GitHub Actions in a Codefresh pipeline" +alt="Using GitHub Actions in a Codefresh pipeline" +%} + + +Once the pipeline reaches the GitHub Action step, the converter automatically does the following: + +1. Finds the Dockerfile of the GitHub Action +1. Builds the Dockerfile +1. Takes the resulting image and inserts it as Codefresh step +1. Passes the environment variables as arguments to the GitHub Action +1. Runs the `cmd` command + +If you have issues, please contact us or open a support ticket, and let us know which GitHub Action you are trying to use and the URL of the Codefresh build that fails. + + +## Related articles +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Plugin marketplace](https://codefresh.io/steps/){:target="\_blank"} + + + + + diff --git a/_docs/integrations/google-cloud.md b/_docs/integrations/google-cloud.md new file mode 100644 index 000000000..89b673ea9 --- /dev/null +++ b/_docs/integrations/google-cloud.md @@ -0,0 +1,122 @@ +--- +title: "Google Cloud pipeline integration" +description: "Use Google Cloud with Codefresh pipelines" +group: integrations +redirect_from: + - /docs/deploy-your-containers/kubernetes/ +toc: true +--- + +Codefresh has native support for Google Cloud in the following areas: + +- [Connecting to Google registries]({{site.baseurl}}/docs/docker-registries/google-container-registry/) +- [Deploying to GKE]({{site.baseurl}}/docs/integrations/kubernetes/#adding-gke-cluster) +- [Using Google Storage for Test reports]({{site.baseurl}}/docs/testing/test-reports/#connecting-a-google-bucket) +- [Using Google Storage for Helm charts]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/#private-repository---gcs) +- [Using Cloud Build]({{site.baseurl}}/docs/integrations/gcloud-builder/) +- [Installing the Runner via the Marketplace]({{site.baseurl}}/docs/integrations/google-marketplace/) + + +## Using Google Container Registries + +Google Container registries are fully compliant with the Docker registry API that Codefresh follows. You can connect GCR like any [other Docker registry]({{site.baseurl}}/docs/docker-registries/google-container-registry/). + +{% + include image.html + lightbox="true" +file="/images/integrations/docker-registries/add-gcr-registry.png" +url="/images/integrations/docker-registries/add-gcr-registry.png" +alt="Connecting to GCR" +caption="Connecting to GCR" +max-width="70%" +%} + +Once the registry is added, you can the [standard push step]({{site.baseurl}}/docs/pipelines/steps/push/) in pipelines. See also the documentation page for [working with Docker registries]({{site.baseurl}}/docs/integrations/docker-registries/). + +## Deploying to Google Kubernetes Engine + +Codefresh has native support for connecting a GKE cluster in the [cluster configuration screen]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster). + +{% + include image.html + lightbox="true" +file="/images/integrations/google-cloud/gke-integration.png" +url="/images/integrations/google-cloud/gke-integration.png" +alt="Connecting a GKE cluster" +caption="Connecting a GKE cluster" +max-width="40%" +%} + +Once the cluster is connected, you can use any of the [available deployment options]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/) for Kubernetes clusters. +You also get access to all other Kubernetes dashboards such as the [cluster dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) or the [environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/). + +## Storing test reports in Google Cloud storage + +Codefresh has native support for test reports. You can store the reports on Google Cloud storage. + +{% include +image.html +lightbox="true" +file="/images/integrations/google-cloud/google-cloud-storage.png" +url="/images/integrations/google-cloud/google-cloud-storage.png" +alt="Google cloud storage" +caption="Google cloud storage" +max-width="50%" +%} + +See the full documentation for [test reports]({{site.baseurl}}/docs/testing/test-reports/). + +## Using Google Storage for storing Helm charts + +You can connect Google storage as a Helm repository by setting up a [Helm integration]({{site.baseurl}}/docs/integrations/helm/#$add-helm-repository/) in Codefresh. + +{% include +image.html +lightbox="true" +file="/images/integrations/google-cloud/google-storage-helm-repo.png" +url="/images/integrations/google-cloud/google-storage-helm-repo.png" +alt="Using Google Cloud for Helm charts" +caption="Using Google Cloud for Helm charts" +max-width="60%" +%} + +Once you connect your Helm repository you can use it any [Codefresh pipeline with the Helm step]({{site.baseurl}}/docs/new-helm/using-helm-in-codefresh-pipeline/). + +## Using Google Cloud build + +Codefresh has a [native Docker build step]({{site.baseurl}}/docs/pipelines/steps/build/) for creating Docker images. As an alternative method of building Docker images, you can also use [Google Cloud Build]({{site.baseurl}}/docs/integrations/gcloud-builder/) in a Codefresh pipeline. + +## Installing the Codefresh runner from the Google Marketplace + +The [Codefresh runner]({{site.baseurl}}/docs/installation/codefresh-runner/) is a Kubernetes native application that allows you to run pipelines on your own Kubernetes cluster (even behind the firewall). Specifically for Google Cloud, the runner is also available via the [marketplace]({{site.baseurl}}/docs/integrations/google-marketplace/){:target="\_blank"}. + + +## Traditional Google Cloud deployments + +For any other Google Cloud deployment you can use the [Google Cloud CLI from a Docker image](https://hub.docker.com/r/google/cloud-sdk/){:target="\_blank"} in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +`YAML` +{% highlight yaml %} +{% raw %} + create_a_vm: + title: "Creating a Virtual machine" + type: "freestyle" + arguments: + image: "google/cloud-sdk:slim" + commands: + - echo $KEY_FILE | base64 --decode > key_file.json + - gcloud compute instances create demo-codefresh --image codefresh-simple-ubuntu-vm --zone europe-west1-b --metadata-from-file startup-script=startup.sh --tags http-server --preemptible --quiet +{% endraw %} +{% endhighlight %} + +See the example of [uploading to a Google Bucket]({{site.baseurl}}/docs/example-catalog/ci-examples/uploading-or-downloading-from-gs/) or [creating a VM]({{site.baseurl}}/docs/example-catalog/ci-examples/packer-gcloud/) for more details. + + + + + +## Related articles +[Add your cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Cloning Git repositories]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) + diff --git a/_docs/integrations/google-marketplace.md b/_docs/integrations/google-marketplace.md new file mode 100644 index 000000000..e02b36aa1 --- /dev/null +++ b/_docs/integrations/google-marketplace.md @@ -0,0 +1,249 @@ +--- +title: "Google Marketplace integration" +description: "Learn how to run Codefresh pipelines inside your GKE cluster" +group: integrations +toc: true +--- + +Codefresh has partnered with [Google Cloud](https://cloud.google.com/){:target="\_blank"} and allows you to install a Codefresh pipeline builder within your own Kubernetes cluster. +The integration is available in the Google Marketplace for Kubernetes apps at [https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh](https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh){:target="\_blank"}. + +Once you configure Codefresh to use your own Kubernetes cluster for builds, you can enjoy all benefits of a **hybrid** installation as the Codefresh UI and management dashboards will still run in a SAAS manner, while the actual builds and pipelines will execute in your own cluster. + +The major benefits are the following: + + * You define exactly what resources are used for your builds instead of relying on Codefresh infrastructure. + * The management UI still runs in the Codefresh premises and is managed by the Codefresh team allowing you to focus on your builds. + * The Codefresh builder has access to all private resources that run in your cluster so it is very easy to use resources that should not be exposed to the Internet for any reason. + * Unified billing. You pay a single bill to Google that includes the price for your Kubernetes cluster as well as the Codefresh pipelines. + + +To start the integration, you need the following: + +1. A [Google Cloud account](https://cloud.google.com/){:target="\_blank"} with billing enabled +1. A [GKE cluster](https://cloud.google.com/kubernetes-engine/docs/quickstart){:target="\_blank"} that will run all builds and pipelines +1. A [Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/) (creating an account is free, you pay only for builds) + +Then visit the Codefresh GKE page at [https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh](https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh){:target="\_blank"}. + +## Using Codefresh from the Google Marketplace + +When you configure Codefresh integration from the Google Marketplace, a special Codefresh runner [is installed](https://github.com/codefresh-io/google-marketplace-integration){:target="\_blank"} in your own cluster. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/architecture.png" +url="/images/integrations/google-marketplace/architecture.png" +max-width="80%" +caption="Google Marketplace integration" +alt="Google Marketplace integration" +%} + +The Codefresh UI is still hosted by Codefresh in a SAAS manner. The builds themselves however +run inside your own cluster. + +The builder is responsible for executing all your builds and notifying the Codefresh UI of their status. You can also access internal cluster resources that are normally not accessible to the SAAS hosted Codefresh builders. + +You can still run builds in the Codefresh SAAS infrastructure if you wish, and therefore both approaches are valid at the same time. + +## Usage and billing + +To start using the service, you need to [enable billing](https://cloud.google.com/billing/docs/how-to/modify-project){:target="\_blank"} in your Google Cloud account. Once that is done, Codefresh billing is integrated into your Google invoices. + +You will pay for the cluster resources to Google, plus the Codefresh builds. Codefresh does not collect any payment from you directly. Google Cloud will invoice you for both the cluster infrastructure and the cluster usage. + +Current pricing for Codefresh builds is always shown in the [marketplace page](https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh){:target="\_blank"}. + +## Install the Google Marketplace + +### Step 1: Create a Codefresh API key + +1. Log in to your Codefresh account, and from your avatar dropdown, select [**User Settings**](https://g.codefresh.io/user/settings){:target="\_blank"}. +1. Scroll down to **API Keys**. +1. To create a new API key, click **Generate**, and do the following: + * **Key Name**: Enter the name of the key, preferable one that will help you remember its purpose. The token is tied to your Codefresh account and should be considered sensitive information. + * **Scopes**: Select _all_ the scopes. +1. Copy the token to your clipboard. +1. Click **Create**. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/generate-token.png" +url="/images/integrations/google-marketplace/generate-token.png" +max-width="40%" +caption="Generating a Codefresh API token" +alt="Generating a Codefresh API token" +%} + + +With the token at hand, we can go to the Google marketplace. + +### Step 2: Install the Codefresh application in your Google Cloud cluster + +1. Navigate to [https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh](https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh){:target="\_blank"}. +1. From the dropdown menu at the top of the page, select a Google project that has billing enabled. +1. Click **Configure**. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/configure-plan.png" +url="/images/integrations/google-marketplace/configure-plan.png" +max-width="50%" +caption="Installing the Codefresh application" +alt="Installing the Codefresh application" +%} + +{:start="4"} +1. Define the general settings for the installation. + These include: + * The cluster that will be used for installation + * An existing or new namespace where the Codefresh builder will reside + * A name for your installation (arbitrary choice) + * Your Codefresh API token that you created in the previous section + * The selection of the account that will be used for the cluster management. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/settings.png" +url="/images/integrations/google-marketplace/settings.png" +max-width="50%" +caption="Codefresh installation settings" +alt="Codefresh installation settings" +%} + +{:start="5"} +1. Note down the namespace you used, as it becomes important later on inside the Codefresh UI. +1. After defining all the settings, to install, click **Deploy**. Wait for a few minutes for the installation complete. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/deploying.png" +url="/images/integrations/google-marketplace/deploying.png" +max-width="50%" +caption="Deploying the Codefresh application" +alt="Deploying the Codefresh application" +%} + +The Codefresh application is now installed on your cluster. + +### Step 3: Set up communication with Codefresh SaaS + +To finish the installation, we need to make Codefresh SaaS aware of the new builder. + +1. On the right hand-side, copy the full command to complete the installation. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/run-command.png" +url="/images/integrations/google-marketplace/run-command.png" +max-width="60%" +caption="Endpoint command" +alt="Endpoint command" +%} + +This command must be executed from a shell that has `kubectl` installed with the correct configuration for the cluster that was used for the installation. If you already have a local shell that points to your cluster, feel free to paste the command there and run it. + +The easiest way to run it in any other case is via the [Google shell](https://cloud.google.com/shell/docs/){:target="\_blank"}. Click the *Activate Google shell* icon from the top right and wait a bit until the shell appears at the bottom part of the window. + +First you need to set up `kubectl` access. Run: + +`Google shell` +{% highlight shell %} +{% raw %} +gcloud container clusters list +{% endraw %} +{% endhighlight %} + +This will show you a list of your clusters. Find the one that has the Codefresh application and run: + +`Google shell` +{% highlight shell %} +{% raw %} +gcloud container clusters get-credentials [my-cluster-name] --zone=[my-cluster-zone] +{% endraw %} +{% endhighlight %} + +This will set up `kubectl` access. You can try running some command such as `kubectl get nodes` and `kubectl cluster-info` to verify that cluster communication is setup correctly. + +Then run the full command. Here is an example: + +`Google shell` +{% highlight shell %} +{% raw %} +$ APP="codefresh-kostis" NS="kostisdemo" ENDPOINT="$(kubectl cluster-info | head -1 | cut -d' ' -f6 | sed 's/\x1b[[0-9;]*m//g' | tr -d '\n' | base64)" && kubectl -n $NS get s +ecret $APP-secret -o yaml | sed -r "s/(kubeEndpoint: ).*$/\1$ENDPOINT/" | kubectl apply -f - && kubectl -n $NS delete pod -l app.kubernetes.io/name=$APP + +secret "codefresh-kostis-secret" configured +pod "codefresh-kostis-kube-agent-86dbcc67c4-9gqqb" deleted +{% endraw %} +{% endhighlight %} + + +Once the command is run, you can visit the [Codefresh Kubernetes dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) and you will see your Google Cloud cluster already configured. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/cluster-details.png" +url="/images/integrations/google-marketplace/cluster-details.png" +max-width="60%" +caption="Codefresh Kubernetes dashboard" +alt="Codefresh Kubernetes dashboard" +%} + +The full integration is now ready, and you can start running Codefresh pipelines in your own cluster. + +### Step 4: Start running pipelines + +Now whenever you set up a [Codefresh pipeline]({{site.baseurl}}/docs/pipelines/pipelines/), you can choose its execution environment and point it to your own cluster with the Codefresh builder. + +>At this point if you have a Codefresh browser window open, make sure that you log out and then log in again so that the new UI options regarding your cluster become available. + +Open any Codefresh pipeline and toggle the *Run on Environment* switch. Select the cluster and the namespace that you used for the installation in step 2. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/run-in-environment.png" +url="/images/integrations/google-marketplace/run-in-environment.png" +max-width="60%" +caption="Running Pipelines in your cluster" +alt="Running Pipelines in your cluster" +%} + + +You can still use the Codefresh SAAS if you don't enable this switch. You can choose which pipelines +run in Codefresh SAAS and which use your cluster depending on your needs. + + +## Alternative installation from the command line + +Instead of installing via the Google Cloud console, you can also install the Codefresh application using command line procedures. + +For this installation mode, see the [manual installation guide](https://github.com/codefresh-io/google-marketplace-integration/blob/master/README.md){:target="\_blank"}. + +## Removing the installation + +If you want to remove the Codefresh builder from your cluster, navigate to the "Applications" page in the Google Cloud console and click the Delete button. + +{% include image.html +lightbox="true" +file="/images/integrations/google-marketplace/remove.png" +url="/images/integrations/google-marketplace/remove.png" +max-width="60%" +caption="Removing the Codefresh application" +alt="Removing the Codefresh application" +%} + +You can install the Codefresh builder again from the [marketplace](https://console.cloud.google.com/marketplace/details/codefresh-gke/codefresh){:target="\_blank"}. + +## Related articles +[Manage your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + + + + + + + + diff --git a/_docs/integrations/hashicorp-vault.md b/_docs/integrations/hashicorp-vault.md new file mode 100644 index 000000000..24301ecd6 --- /dev/null +++ b/_docs/integrations/hashicorp-vault.md @@ -0,0 +1,77 @@ +--- +title: "HashiCorp Vault" +description: "Use secrets from Vault in Codefresh pipelines" +group: integrations +toc: true +--- + +Codefresh can use secrets from your HashiCorp Vault installation. This way you have full control over secret storage and rotation. + +>This feature is for Enterprise accounts only. + +## Prerequisites + +* Up and running Vault instance + Codefresh supports HashiCorp Cloud Platform (HCP) Vault and self-managed Vault instances that run on the cloud, as well as behind the firewall (albeit with some differences in the authentication methods). + +* [Authentication method](https://www.vaultproject.io/docs/auth){:target="\_blank"} to use. + Codefresh supports the following methods: + +{: .table .table-bordered .table-hover} +| Method | Notes | +|---|--- | +| [Username/Password](https://www.vaultproject.io/docs/auth/userpass){:target="\_blank"}|Available for SaaS and Hybrid versions | +| [Access Token](https://www.vaultproject.io/docs/auth/token){:target="\_blank"}|Available for SaaS and Hybrid versions | +| [Kubernetes](https://www.vaultproject.io/docs/auth/kubernetes){:target="\_blank"}|Only available with [Codefresh Runner installation]({{site.baseurl}}/docs/reference/behind-the-firewall/) | +| [Google Cloud Engine](https://www.vaultproject.io/docs/auth/gcp){:target="\_blank"}|Only available with [Codefresh Runner installation]({{site.baseurl}}/docs/reference/behind-the-firewall/) | +| [App Role](https://www.vaultproject.io/docs/auth/approle){:target="\_blank"}|Available for SaaS and Hybrid versions | + +## Set up HashiCorp Vault integration in the Codefresh UI + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Secret Store** and then click **Configure**. +1. From the **Add Provider** dropdown, select **Hashicorp vault**. +1. Do the following: + * **Name**: A unique name for the integration which is referenced in `codefresh.yaml`. + * If your Vault instance is behind a firewall, toggle **Vault is behind a firewall** to ON. + * To allow only Codefresh admins to change the Vault configuration, toggle **Allow access to all users** to OFF. + > The other settings are specific to your [Vault authentication](https://www.vaultproject.io/docs/auth){:target="\_blank"} method. Refer to the Vault documentation on how to get the required values. + +{% include image.html +lightbox="true" +file="/images/integrations/hashicorp-vault/hashicorp-vault.png" +url="/images/integrations/hashicorp-vault/hashicorp-vault.png" +alt="HashiCorp-vault Secret" +caption="HashiCorp-vault Secret" +max-width="80%" + %} + +{:start="5"} +1. To apply the changes, click **Save**. + + +### Set up HashiCorp Vault integration via Codefresh CLI + +You can also create Vault integrations with the [CLI](https://codefresh-io.github.io/cli/){:target="\_blank"}. + +Use the [create context command](https://codefresh-io.github.io/cli/contexts/create-context/create-secret-store-context/hashicorp-vault/){:target="\_blank"}. + +The options available are identical to the UI settings. +For example, to create an integration with user/password authentication, you would run this command: + +`codefresh create context secret-store hashicorp-vault --sharing-policy AccountAdmins -app-url --username --password ` + + + +### Using the HashiCorp Vault secret + +To use the Vault secrets in pipelines, see our [secrets guide]({{site.baseurl}}/docs/pipelines/secrets-store/). +Because a secret in Vault can contain multiple key-value pairs, you will need to put in the key name as well, according to the syntax `{secrets.vault-store-name.path/to/secret@key}`. + +## Related articles +[Shared Configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) +[Git integration for pipelines]({{site.baseurl}}/docs/integrations/git-providers/) +[Kubernetes integration for pipelines]({{site.baseurl}}/docs/integrations/kubernetes/) +[Container registry integration for pipelines]({{site.baseurl}}/docs/integrations/docker-registries/) diff --git a/_docs/integrations/helm.md b/_docs/integrations/helm.md new file mode 100644 index 000000000..96700f20b --- /dev/null +++ b/_docs/integrations/helm.md @@ -0,0 +1,189 @@ +--- +title: "Helm Integration" +description: "Manage Helm releases and repositories with Codefresh pipelines" +group: integrations +toc: true +--- + +Codefresh is one of the few DevOps platforms with native support for Helm releases and deployments. +In addition to the [built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) available to all Codefresh accounts,you can add any external Helm repository to Codefresh through integrations. + +Native support for Helm in Codefresh includes: + * A pipeline [step for deploying Helm applications]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) + * A dashboard for your [Helm charts]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) + * A dashboard for your [Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) + * A dashboard for [promoting Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) between different environments + * A dashboard for [Helm environments]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) + +The built-in Helm repository is production ready. You can start using Helm right away with your Codefresh account, +even if you don't have an external Helm repository. See our [quick start guide for Helm]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm/) or the [complete Helm example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). + +For each Helm integration, you can toggle the level of access by [non-admin users]({{site.baseurl}}/docs/administration/access-control/#users-and-administrators). + +## Set up external Helm integration + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Helm** and then click **Configure**. +1. From the **Add Helm Repository** dropdown, select the type of external Helm repository: + * [Azure Registry](#azure-registry-helm-repository-settings) + * [Azure Regsitry MI](#azure-registry-with-managed-identity-mi-helm-repository-settings) + * [Azure Registry SP](#azure-registry-with-service-principal-sp-helm-repository-settings) + * [Codefresh](#helm-repository-from-another-codefresh-account) + * [Google Cloud Storage](#google-cloud-storage-gcs-helm-repository-settings) + * [HTTP Basic Authentication](#http-basic-authentication-settings) + * [Amazon AWS S3](#amazon-aws-s3-helm-repository-settings) + + +1. To restrict access to only Codefresh admins, toggle **Allow access to all users** to OFF. + >When access is restricted, users **cannot** use the [CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} or [API]({{site.baseurl}}/docs/integrations/codefresh-api/) to [programmatically access this Helm repository](https://codefresh-io.github.io/cli/contexts/){:target="\_blank"}. + Otherwise, all users from all your Codefresh teams will be able to access this Helm repository with CLI commands or API calls. + + + +### HTTP Basic Authentication settings + +You can connect to your external repository with HTTP Basic authentication. +The table below describes the settings. + +Setting|Description +---|--- +**Helm Repository Name**|The unique name of integration which is used to reference the integration in `codefresh.yaml` +**Repository URL**|The URL to the Helm repository with `http://` protocol prefix. +**Helm Repo Username**|The username to authenticate with. +**Helm Repo Password**|The password for the username provided. + +### Amazon AWS S3 Helm repository settings + +You can connect to Amazon AWS S3 Helm repository. Supply the AWS authentication credentials as you would for the AWS CLI, or the S3 plugin for Helm. For details, see [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html){:target="\_blank"}. + +The table below describes the settings. + +Setting|Description +---|--- +**Helm Repository Name**|The unique name of integration which is used to reference the integration in `codefresh.yaml` +**Helm Repository URL**|The URL to the Helm repository in the format `s3://bucketname`. +**AWS Access Key ID**|The ID of the key with permissions to the S3 bucket. +**AWS Secret Access Key**|The Secret of the key with permissions to the S3 bucket. +**AWS Default Region**|The region where the S3 bucket is located. + + +### Google Cloud Storage (GCS) Helm repository settings + +You can connect to a Google Cloud Storage (GCS) Helm repository. Supply the GCS authentication credentials as you would for the GCloud CLI, or the GCS plugin for Helm. For details, see [Creating Service Account](https://cloud.google.com/docs/authentication/getting-started){:target="\_blank"}{:target="\_blank"}. + +The table below describes the settings. + +Setting|Description +---|--- +**Helm Repository Name**|The unique name for the Helm repository integration which is used to reference the integration in `codefresh.yaml` +**Helm Repository URL**|The URL to the Helm repository in the format `gs://bucketname`. +**Google Application Credentials JSON**|The JSON content with the credentials of the service account. + + + +### Azure Registry Helm repository settings + +**Prerequsities** +1. [Create the Helm repository](https://docs.microsoft.com/en-us/azure/container-registry/container-registry-helm-repos){:target="\_blank"} in Azure. +1. Click **Authenticate**. +1. In the permissions dialog, to allow Codefresh to access the Azure services, click **Accept**. + +>Make sure that you are using an organizational/company Azure account, and not a personal one. We are currently working with Microsoft to improve this integration. + +**Settings** + + +{% include image.html +lightbox="true" +file="/images/integrations/helm/select-azure-helm-repository.png" +url="/images/integrations/helm/select-azure-helm-repository.png" +alt="Selecting an Azure Helm repository" +caption="Selecting an Azure Helm repository" +max-width="70%" +%} + +Setting|Description +---|--- +**Subscriptions**|Select your Azure subscription. +**Registry**|The Helm repository to connect to. + +>If you are already authenticated to Azure, and cannot find your Helm repository in the list, try revoking access, and authenticating again. + + +### Azure Registry with Service Principal (SP) Helm repository settings + +An alternative method of adding an Azure Helm repository is by using a service principal. + +**Prerequsities** +* [Create a service principal in the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal){:target="\_blank"}. + + + +**Settings** +1. Click **Authenticate**. +1. Enter the following: + * **Client ID** + * **Tenant** + * **Client secret** + +{% include image.html +lightbox="true" +file="/images/integrations/helm/add-azure-helm-spn.png" +url="/images/integrations/helm/add-azure-helm-spn.png" +alt="Azure Service Service Principal details" +caption="Azure Service Principal details" +max-width="60%" + %} + +1.Click **Authenticate**. Assuming that the authentication is successful, you can view your available Azure registries that can be used as a Helm repository. + + + + + +### Helm repository from another Codefresh account + +You also add the private Helm repository of another Codefresh user as your integration. + +>We **don't** recommend sharing the Codefresh Helm repository between accounts. The built-in Helm repository of each account is best used as a private Helm repository of that account. See more details on [how to make your private Helm public]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/#repo-access-level). + +The table below describes the settings. + +Setting|Description +---|--- +**Helm Repository Name**|The unique name for the Helm repository integration which is used to reference the integration in `codefresh.yaml` +**Helm Repository URL**|The URL to the Helm repository in the format `cm://repository-name`. +**CF API Key**|A token [to access the other Codefresh account]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions). + + +## Related articles +[Private external Helm repositories]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[How to use Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Managing Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) +[Helm best practices]({{site.baseurl}}/docs/deployments/helm/helm-best-practices/) + diff --git a/_docs/integrations/jenkins-integration.md b/_docs/integrations/jenkins-integration.md new file mode 100644 index 000000000..157806ac8 --- /dev/null +++ b/_docs/integrations/jenkins-integration.md @@ -0,0 +1,907 @@ +--- +title: "Jenkins integration/migration" +description: "Migration from Jenkins to Codefresh pipelines" +group: integrations +redirect_from: + - /docs/jenkins-integration/ +toc: true +--- + +Codefresh offers a superset of the capabilities offered by Jenkins, and therefore you can fully replace a Jenkins solution using only Codefresh on its own. + +During the migration period, it is very easy to make both solutions work together. This allows you to move gradually new CI/CD tasks to Codefresh and still keep the existing functionality in Jenkins jobs. + +## Calling Codefresh pipelines from Jenkins Jobs + +This is the most common scenario during the migration period. The CI part (code packaging) is still in Jenkins, while the CD part (actual deployments) happen with Codefresh. + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/calling-codefresh-from-jenkins.png" +url="/images/integrations/jenkins/calling-codefresh-from-jenkins.png" +alt="Calling a Codefresh pipeline from a Jenkins Job" +caption="Calling a Codefresh pipeline from a Jenkins Job" +max-width="100%" +%} + +1. For Jenkins to connect to your Codefresh account, create [a Codefresh API token]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions). +1. Enter the token in Jenkins [as a global Credential](https://jenkins.io/doc/book/using/using-credentials/){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/jenkins-credentials.png" +url="/images/integrations/jenkins/jenkins-credentials.png" +alt="Storing the Codefresh API token in Jenkins" +caption="Storing the Codefresh API token in Jenkins" +max-width="60%" +%} + +Now you can create any declarative or scripted Jenkins pipeline that uses the token and the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} to call Codefresh pipelines from Jenkins. + +Here is a very simple example: + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +pipeline { + agent { + docker { + image 'codefresh/cli:latest' + args '--entrypoint=""' + } + } + environment { + CODEFRESH_API_TOKEN= credentials('codefresh-token') + } + stages { + stage('Calling Codefresh pipeline') { + steps { + sh 'codefresh auth create-context --api-key $CODEFRESH_API_TOKEN' + sh 'codefresh run my-first-project/basic-build -t my-trigger -b master' + } + } + } +} +{% endraw %} +{% endhighlight %} + +Run the Jenkins job, and it also triggers a Codefresh pipeline: + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/call-a-codefresh-pipeline.png" +url="/images/integrations/jenkins/call-a-codefresh-pipeline.png" +alt="Calling a Codefresh pipeline in a Jenkins step" +caption="Calling a Codefresh pipeline in a Jenkins step" +max-width="30%" +%} + +In the logs of the Jenkins job, you will see the Codefresh logs (the Codefresh CLI automatically shows logs in standard output). + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/codefresh-logs-from-jenkins.png" +url="/images/integrations/jenkins/codefresh-logs-from-jenkins.png" +alt="Viewing Codefresh logs from Jenkins" +caption="Viewing Codefresh logs from Jenkins" +max-width="60%" +%} + +Of course, if you visit the Codefresh UI you will also see the [running pipeline]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/). +With this kind of integration, it is very easy to create Jenkins Jobs that compile/package code and add a step in the jobs +that calls Codefresh for deployment. + + +## Calling Jenkins jobs from Codefresh pipelines + +This is the opposite scenario. As you move more functionality into Codefresh, it might make sense to have a Codefresh pipeline that actually calls Jenkins jobs for tasks that are not migrated yet. + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/calling-jenkins-from-codefresh.png" +url="/images/integrations/jenkins/calling-jenkins-from-codefresh.png" +alt="Calling a Jenkins Job from a Codefresh pipeline" +caption="Calling a Jenkins Job from a Codefresh pipeline" +max-width="100%" +%} + +* For Codefresh to authenticate to your Jenkins instance, from **User Settings** in the Jenkins UI, create a [Jenkins API token](https://jenkins.io/blog/2018/07/02/new-api-token-system/){:target="\_blank"}. + Give your token any name that reminds you of its purpose. The name itself is arbitrary. + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/jenkins-api-token.png" +url="/images/integrations/jenkins/jenkins-api-token.png" +alt="Jenkins API token" +caption="Jenkins API token" +max-width="60%" +%} + +Once you have the token, you can use the [Codefresh plugin for triggering Jenkins Jobs](https://github.com/codefresh-io/plugins/blob/new-pipeline/plugins/run-jenkins-job/README.md){:target="\_blank"} in any pipeline +like this: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + RunJenkins: + title: Triggering Jenkins Job + image: codefresh/cf-run-jenkins-job + environment: + - JENKINS_URL=${{JENKINS_URL}} + - JENKINS_USER=${{JENKINS_USERNAME}} + - JENKINS_TOKEN=${{JENKINS_TOKEN}} + - JENKINS_JOB=${{JENKINS_JOB}} +{% endraw %} +{% endhighlight %} + +The value of the variables can be stored either in your [Codefresh shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) or directly [in the pipeline]({{site.baseurl}}/docs/pipelines/pipelines/#creating-new-pipelines): + + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/jenkins-variables.png" +url="/images/integrations/jenkins/jenkins-variables.png" +alt="Jenkins Job variables" +caption="Jenkins Jobs variables" +max-width="40%" +%} + +Launching the Codefresh pipeline also triggers the remote Jenkins Job. + + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/trigger-remote-jenkins-job.png" +url="/images/integrations/jenkins/trigger-remote-jenkins-job.png" +alt="Trigger remote Jenkins Job" +caption="Trigger remote Jenkins Job" +max-width="60%" +%} + +It is possible to mix both scenarios at the same time (Codefresh pipelines that call Jenkins Jobs and vice-versa). + +## Migrating from Jenkins to Codefresh + +Now that you know how to mix pipelines from both platforms, it is helpful to understand how you can migrate all your Jenkins Jobs to Codefresh. In most cases, several actions that require custom scripts in Jenkins (or plugins/shared libraries) are already integrated into the core Codefresh platform. Here are some high-level differences between the two platforms: + +{: .table .table-bordered .table-hover} +| Feature | Jenkins | Codefresh | +| -------------- | ---------------------------- |-------------------------| +| Architecture | VM based | [container-based]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/)| +| Pipeline definition | Groovy | [YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) | +| Tool installation | Installed on build node | dynamically launched | +| Plugin mechanism | Java/Groovy using Jenkins API | [Docker image](https://codefresh.io/steps/){:target="\_blank"} (any programming language)| +| Plugin installation | Central (requires admin access) | Per pipeline (no admin access needed) | +| Docker agent builds | Extra plugin | Built-in | +| Kubernetes agent builds | Extra plugin | Built-in | +| Docker commands | Manually run in pipelines | Built-in pipeline steps | +| Access to Kubectl | External plugin | Built-in | +| Kubernetes deployments | External plugin | Built-in | + +It is important to understand that when you are switching to Codefresh you get a set of higher level abstraction for your builds: + +* Unlike Jenkins, Codefresh automatically has a distributed fleet of build nodes and manages all builds on its own. +* With Codefresh you don't need to install anything on the build nodes (in fact you don't even have SSH access on them). All build tools are automatically launched in pipelines as Docker images. +* In Codefresh, you can use the same tools with different versions in the same pipeline, without any special configuration (for example, use Java 5 and Java 8 in the same pipeline). +* Codefresh plugins are used per pipeline by simply defining them. There is nothing to install centrally (such as Jenkins plugins or shared libraries). Different teams can use different tools on their pipeline without affecting each other. +* Codefresh plugins are just Docker images with predefined inputs/outputs. They can be programmed in any programming language (not just Java/Groovy) and are not tied to Codefresh in any way (i.e. there is no need to know the Codefresh API for writing a Codefresh plugin). +* Jenkins pipelines can be free-style (VM based), scripted (VM/container-based) or declarative (VM/container based) meaning that there are at least 5 ways on how you can write your pipeline. In Codefresh there is only way (declarative/container-based). +* Jenkins pipelines are connected to a single git repository. Codefresh pipelines can be connected to multiple [git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) which +themselves are connected to git repositories. Therefore a Codefresh pipeline can be reused for multiple projects. +* Specifically for building Docker images, Codefresh can automatically connect to any [external Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). +* Specifically for Kubernetes deployments, Codefresh automatically sets up `kubectl` access in pipelines [from connected clusters]({{site.baseurl}}/docs/integrations/kubernetes/#add-kubernetes-cluster/). There is no configuration needed to achieve this behavior. Codefresh also has several [built-in ways for Kubernetes deployments]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/) and a [dedicated UI dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) to see what your cluster is doing. +* Specifically for Helm deployments, Codefresh includes a private Helm repository and several [Helm dashboards]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). + + +### Migrating Jenkins freestyle jobs + +If you have freestyle Jenkins Jobs (or are still using Jenkins 1.x), it is very easy to migrate your builds to Codefresh. +In Jenkins, you are accustomed to: + +1. Installing a programming tool on the Jenkins node. +1. Calling it directly in a build step. + +In Codefresh, a similar process would be the following: + +1. Find a Docker image in Docker Hub or create one by yourself that has the tools that you need. +1. Use [a freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/), and run the exact same command in a Codefresh pipeline. + +For example this Jenkins job... + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/jenkins-freestyle-job.png" +url="/images/integrations/jenkins/jenkins-freestyle-job.png" +alt="Jenkins freestyle job" +caption="Jenkins freestyle job" +max-width="60%" +%} + + +...can be easily converted to a Codefresh pipeline like this: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_jar_compilation: + title: Compile/Unit test + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn package + my_node_app: + title: Running unit tests + image: node:11 + commands: + - npm run test +{% endhighlight %} + +Unlike Jenkins, Codefresh does **not** need any global installation of tools beforehand. + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/jenkins-tool-installation.png" +url="/images/integrations/jenkins/jenkins-tool-installation.png" +alt="Jenkins Tool installation - not needed with Codefresh" +caption="Jenkins Tool installation - not needed with Codefresh" +max-width="40%" +%} + +In Codefresh, you can just use one or more Docker images in your pipeline. The tool versions will be launched only while the pipeline is active. Once the pipeline is finished, all Docker images that took part in it are discarded. The Codefresh build node has only Docker installed and nothing else. +This means that you can easily mix and match tool versions. + +Here is a Codefresh pipeline that uses multiple versions of Java and Node. + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + PackageMyNode1App: + title: Packaging Node application 1 + stage: packaging + image: node:11.1 + working_directory: ./dashboard + commands: + - echo "My Node version is" + - node --version + - npm install + PackageMyNode2App: + title: Packaging Node application 2 + stage: packaging + image: node:9.3.0-slim + working_directory: ./website + commands: + - echo "My Node version is" + - node --version + - npm install + RunUnitTests: + title: Running Unit tests + image: maven:3.6.1-jdk-11 + working_directory: ./backend + commands: + - java -version + - mvn test + PackageBackend: + title: Compile/Unit test + image: maven:3.5.2-jdk-8-alpine + working_directory: ./backend + commands: + - java -version + - mvn package -Dmaven.test.skip +{% endhighlight %} + +Meanwhile, another team might have a different pipeline using different versions of Maven and/or Java. Each team can decide on the exact version needed just by changing the [Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/). + +It should be easy to see now that by migrating from Jenkins to Codefresh, several problems in Jenkins are simply eliminated: + +* You do not need to be an admin to install programming tools anymore, +* Tools are defined per pipeline instead of being preloaded centrally, +* Multiple versions of the same tool can be used on the same pipeline (or different pipelines), +* Upgrading a tool to a new version is trivial (just change the docker tag in the freestyle step), +* You don't need to label build nodes anymore with specific labels that show which tools they contain, +* All public Dockerhub images can be used *with zero changes* in a Codefresh step. + +Notice that for several popular tools, Dockerhub already contains several images. + +* [Maven](https://hub.docker.com/_/maven){:target="\_blank"} +* [Gradle](https://hub.docker.com/_/gradle/){:target="\_blank"} +* [Node](https://hub.docker.com/_/node/){:target="\_blank"} +* [Python](https://hub.docker.com/_/python/){:target="\_blank"} +* [Terraform](https://hub.docker.com/r/hashicorp/terraform/){:target="\_blank"} +* [Packer](https://hub.docker.com/r/hashicorp/packer/){:target="\_blank"} +* [Sonar](https://hub.docker.com/r/skilldlabs/sonar-scanner/){:target="\_blank"} +* [Nexus](https://hub.docker.com/r/sjeandeaux/nexus-cli/){:target="\_blank"} +* [Helm](https://hub.docker.com/r/codefresh/kube-helm/tags){:target="\_blank"} +* [Kubectl](https://hub.docker.com/r/codefresh/kubectl/){:target="\_blank"} +* [gcloud](https://hub.docker.com/r/google/cloud-sdk/){:target="\_blank"} +* [Ansible](https://hub.docker.com/u/ansible){:target="\_blank"} +* [Azure CLI](https://hub.docker.com/r/microsoft/azure-cli/){:target="\_blank"} +* [AWS CLI](https://hub.docker.com/r/mesosphere/aws-cli/){:target="\_blank"} + +Of course, you can create your own Docker image with the exact tools that you want and then use it from the [any Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) or any other registry in your pipeline. + +### Migrating Jenkins pipelines + +In the case of Jenkins pipelines, things are a bit more complicated because there is not a single way anymore on how to structure your pipelines. +First of all, the best-case scenario is when you have *declarative* Jenkins pipelines that already use Docker images for stage execution. + + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +pipeline { + agent none + stages { + stage('Example Build') { + agent { docker 'maven:3-alpine' } + steps { + echo 'Hello, Maven' + sh 'mvn --version' + } + } + stage('Example Test') { + agent { docker 'openjdk:8-jre' } + steps { + echo 'Hello, JDK' + sh 'java -version' + } + } + } +} +{% endraw %} +{% endhighlight %} + +In this case, there is a 1-1 mapping between Jenkins stages and Codefresh steps as you can simply convert each stage into a Codefresh step using the respective Docker image. + +The Jenkins pipeline above can be converted to a Codefresh pipeline with a series of freestyle steps: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_first_step: + title: Example Build + image: maven:3-alpine + commands: + - echo 'Hello, Maven' + - mvn --version + my_second_step: + title: Example Test + image: openjdk:8-jre + commands: + - echo 'Hello, JDK' + - java -version +{% endhighlight %} + +The final Codefresh pipeline will even look like the original Jenkins one. + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/migrate-jenkins-pipeline.png" +url="/images/integrations/jenkins/migrate-jenkins-pipeline.png" +alt="Direct migration from Jenkins to Codefresh" +caption="Direct migration from Jenkins to Codefresh" +max-width="80%" +%} + +If you don't use Docker containers in your Jenkins pipeline, then you need to follow the same advice as the previous section (i.e. find a Docker image that has the tools you need and create your own freestyle step in Codefresh). + +### Checking out source code + +In Jenkins 1.x pipelines, code is automatically checked out by Jenkins when the pipeline starts. In Jenkins 2.x pipelines you are free to insert your own git steps inside a Job: + + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +node { + stage('First Project') { + git 'https://github.com/nodegui/react-nodegui.git' + } + stage('Second Project') { + git url: 'https://github.com/jglick/simple-maven-project-with-tests.git' + branch: 'master' + } +} +{% endraw %} +{% endhighlight %} + +Codefresh has a dedicated [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) that can be used in a similar manner. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + first_project: + type: 'git-clone' + description: 'Cloning first project...' + repo: 'nodegui/react-nodegui' + revision: '${{CF_BRANCH}}' + git: github + second_project: + type: 'git-clone' + description: 'Cloning second...' + repo: 'jglick/simple-maven-project-with-tests' + revision: 'master' + git: github +{% endraw %} +{% endhighlight %} + +You don't need to define any credentials or tokens, as they are already defined centrally in the [git configuration screen]({{site.baseurl}}/docs/integrations/git-providers/). The `CF_BRANCH` variable is one of the [built-in Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/) that shows the branch that was used by the git commit as it came from the [trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) attached to the pipeline. + +You can also [manually run Git commands]({{site.baseurl}}/docs/yaml-examples/examples/git-checkout-custom/) in Codefresh pipelines. + +### Step conditions + +In several cases you want to add conditionals on steps such as the branch that is being compiled: + + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +pipeline { + agent any + stages { + stage('Example Build') { + steps { + echo 'Hello World' + } + } + stage('Example Deploy') { + when { + branch 'master' + } + steps { + echo 'Deploying' + } + } + } +} +{% endraw %} +{% endhighlight %} + +This can be also configured in Codefresh using [step conditionals]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/): + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + title: Example Build + image: alpine:latest + commands: + - echo 'Hello World' + my_second_step: + title: Example Deploy + image: alpine:latest + commands: + - echo 'Deploying' + when: + branch: + only: + - master +{% endraw %} +{% endhighlight %} + +You can define much more complex conditions using the [Codefresh expression language]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/#condition-expression-syntax). + + +### Migrating Jenkins credentials + +Codefresh contains a central repository for user variables and secrets in the form of [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). You can also +inject variables on a specific project or a specific pipeline. + +All injected variables *are automatically available to all Codefresh freestyle steps*. You don't need a special syntax or directive to enable this behavior (unlike Jenkins where you have to use a `withCredentials` block or something similar). + +In Jenkins you have to explicitly ask for a secret: + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +pipeline { + agent any + stages { + stage('Example') { + environment { + AWS_ACCESS_KEY_ID = credentials('jenkins-aws-secret-key-id') + AWS_SECRET_ACCESS_KEY = credentials('jenkins-aws-secret-access-key') + } + steps { + sh 'printenv' + } + } + } +} +{% endraw %} +{% endhighlight %} + +In Codefresh if you setup your variables in the pipeline settings, then the pipeline itself needs nothing special. + +{% include image.html +lightbox="true" +file="/images/integrations/jenkins/pipeline-variables.png" +url="/images/integrations/jenkins/pipeline-variables.png" +alt="Pipeline variables" +caption="Pipeline variables" +max-width="80%" +%} + +You can simply run the pipeline on its own: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + title: Example1 + image: alpine:latest + commands: + - printenv # Injected variables are always available + my_second_step: + title: Example2 + image: alpine:latest + commands: + - echo $AWS_ACCESS_KEY_ID + - echo $AWS_SECRET_ACCESS_KEY +{% endraw %} +{% endhighlight %} + +If you want to use a different name for each variable/secret then you can simply assign them to your desired names: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + title: Example + image: alpine:latest + environment: + - MY_KEY=${{AWS_ACCESS_KEY_ID}} + - MY_ACCESS_KEY=${{AWS_SECRET_ACCESS_KEY}} + commands: + - echo $MY_KEY + - echo $MY_ACCESS_KEY +{% endraw %} +{% endhighlight %} + +In the example above, even though the secrets are already available as environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`, we instead pass them to the pipeline step as `MY_KEY` and `MY_ACCESS_KEY`. + + +### Migrating Jenkins shared libraries + +Creating a Jenkins shared library has a lot of challenges: + +* The library must be written in Groovy/Java, +* The library must use the Jenkins API, +* It is very hard to write unit tests for it, +* Installing it requires admin access in the Jenkins master. + +Codefresh plugins, on the other hand, are just Docker images written in any programming language. + +First of all, look at [Docker Hub](https://hub.docker.com/){:target="\_blank"} and see if there is already a utility or CLI that has the same functionality with your +shared library. Codefresh also has a [free marketplace](https://codefresh.io/steps/){:target="\_blank"} for pipeline steps (which are Docker images essentially). + +As a last resort, you need to rewrite your shared library and convert it to a Docker image. The process is the following: + +1. Start from a base image that contains Groovy and any other tool you need. +1. Convert your shared library to a single Groovy executable that reads input from environment variables and/or files and writes output data to files. +1. Remove all Jenkins specific APIs. +1. Package the image with a simple Dockerfile that just compiles your executable. + +Note that there is nothing Codefresh specific to the end-result. You should end up with a standard Docker image that would be usable in any environment that has Docker installed. + +Once you have that image you can use it like any other Codefresh freestyle step as described in the previous section. + +### Migration of Jenkins pipelines that create Docker images + +Codefresh has native support for: + +1. Building Docker images +1. Running commands inside Docker images +1. Pushing Docker images to different registries + +If you are using Docker commands directly in your Jenkins file, or prefer to take advantage of the scripted +variant of Docker image management then you can easily convert both approaches to Codefresh YAML like below: + +#### Building Docker images + +The most basic Docker operation is building an image. You will need a Dockerfile and a directory to use as build context (usually the same folder that contains the Dockerfile). + +`docker command` +``` +docker build . -t my-app-image:1.0.1 +``` + +Or if you use Jenkins scripted pipelines... + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +node { + docker.build("my-app-image:1.0.1") + docker.build("test-image", "./tests") +} +{% endraw %} +{% endhighlight %} + +...they will become in Codefresh the following [build steps]({{site.baseurl}}/docs/pipelines/steps/build/): + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + title: Building My App image + type: build + image: my-app-image + tag: 1.0.1 + my_second_step: + title: Building My Test image + type: build + image: test-image + working_directory: "./tests" +{% endraw %} +{% endhighlight %} + +#### Running commands in created containers + +Sometimes you want to run commands inside a Docker image. Either to have access to specialized tools or to run tests. + +`docker command` +``` +docker build . -t my-app-image:1.0.1 +docker run my-app-image:1.0.1 npm install +``` + +Or, if you use Jenkins scripted pipelines... + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +node { + def customImage = docker.build("my-app-image:1.0.1") + + customImage.inside { + sh 'npm install' + } +} +{% endraw %} +{% endhighlight %} + +... they will become in Codefresh the following [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + title: Building My App image + type: build + image: my-app-image + tag: 1.0.1 + my_second_step: + title: Install Node dependencies + image: ${{my_first_step}} + commands: + - npm install +{% endraw %} +{% endhighlight %} + +Notice that the second pipeline step actually mentions the first one by name as a running context. + +#### Pushing Docker images + +Notice that in Codefresh [all connected registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) are automatically available to all pipelines. +You don't need special directives such as `withRegistry`. All registries can be mentioned +by their name in any push step (and will be used automatically for pulls when an image uses their domain). + +`docker command` +``` +docker build . -t my-app-image:1.0.1 +docker tag my-app-image:1.0.1 registry.example.com/my-app-image:1.0.1 +docker push registry.example.com/my-app-image:1.0.1 +``` + +Or, if you use Jenkins scripted pipelines... + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +node { + def customImage = docker.build("my-app-image:1.0.1") + + docker.withRegistry('https://registry.example.com', 'example-registry-credentials') { + customImage.push("1.0.1") + customImage.push("latest") + } +} +{% endraw %} +{% endhighlight %} + +...they will become in Codefresh the following [push steps]({{site.baseurl}}/docs/pipelines/steps/push/). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + title: Building My App image + type: build + image: my-app-image + tag: 1.0.1 + my_second_step: + title: Pushing image + type: push + candidate: ${{my_first_step}} + tags: + - 1.0.1 + - latest + registry: example-registry +{% endraw %} +{% endhighlight %} + +Notice again that the second step pushes the image created by the first one. + +##### Complete Docker pipeline + +Here is a full example with a pipeline that builds an image, runs tests, and pushes it to Dockerhub. + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +node { + def app + + stage('Clone repository') { + checkout scm + } + + stage('Build image') { + app = docker.build("my-app-image:1.0.1") + } + + stage('Test image') { + app.inside { + sh 'npm run test' + } + } + + stage('Push image') { + docker.withRegistry('https://registry.hub.docker.com', 'docker-hub-credentials') { + app.push("1.0.1") + app.push("latest") + } + } +} +{% endraw %} +{% endhighlight %} + +Here is the same pipeline in Codefresh: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + type: "git-clone" + description: "Clone repository" + repo: "my-account/my-git-repo" + revision: "${{CF_BRANCH}}" + git: github + my_first_step: + title: Build image + type: build + image: my-app-image + tag: 1.0.1 + my_second_step: + title: Test image + image: ${{my_first_step}} + commands: + - npm run test + my_third_step: + title: Push image + type: push + candidate: ${{my_first_step}} + tags: + - 1.0.1 + - latest + registry: dockerhub +{% endraw %} +{% endhighlight %} + +Notice that Codefresh has much more context regarding Docker registries and credentials. The same approach +is followed with Kubernetes deployments as we will see in the next section. + + +### Migration of Jenkins pipelines that deploy to Kubernetes + +Codefresh has first-class support for Kubernetes deployments. Codefresh can deploy on its own [using different options]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) and no external tools (i.e. Ansible or `kubectl`) are needed. + +Specifically for [Kubernetes]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) and [Helm]({{site.baseurl}}/docs/new-helm/using-helm-in-codefresh-pipeline/), Codefresh has declarative pipeline steps: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + RunningDeploy: + title: Kubernetes Deployment + type: deploy + kind: kubernetes + cluster: myDemoGKEcluster + namespace: production + service: my-service + candidate: + image: 'my-app-image:1.0.1' + registry: 'dockerhub' + DeployMyChart: + image: 'codefresh/cfstep-helm:2.9.1' + environment: + - CHART_REF=charts/python + - RELEASE_NAME=mypython-chart-prod + - KUBE_CONTEXT=myDemoAKSCluster +{% endraw %} +{% endhighlight %} + +As with Docker registries (described in the previous section), Codefresh makes all [added Kubernetes clusters]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) available to all pipelines. You don't need any special plugin or directive (such as `withKubeConfig`) to work with Kubernetes clusters in Codefresh. You can see that the Codefresh pipeline simply mentions Kubernetes clusters and registries without any credential information. + +Of course, it is also very easy to convert any existing Jenkins pipeline by just using any image that contains the `kubectl` executable. + + `Jenkinsfile` +{% highlight groovy %} +{% raw %} +node { + stage('Apply Kubernetes files') { + withKubeConfig([credentialsId: 'user1', serverUrl: 'https://api.k8s.my-company.com']) { + sh 'kubectl apply -f my-kubernetes-directory' + } + } +} +{% endraw %} +{% endhighlight %} + +Codefresh will automatically setup Kube config access to the pipeline behind the scenes. Zero configuration is needed for this behavior. + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyCustomKubectlCommands: + title: Running Kubectl + image: codefresh/kubectl + commands: + - kubectl config use-context "my-k8s-my-company" + - kubectl apply -f my-kubernetes-directory +{% endraw %} +{% endhighlight %} + +Once you use Codefresh for your deployments you also get access to: + +* The [Kubernetes Dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +* A free [built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) with each Codefresh account +* The [Helm chart dashboard]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) +* The [Helm Release dashboard]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) +* The [Helm environment dashboard]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion/) + +For some easy templating see also the [cf-deploy]({{site.baseurl}}/docs/deployments/ci-cd-guides/kubernetes-templating/) plugin. + +## Related articles +[Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/) +[Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} +[Creating Codefresh pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) + + + + diff --git a/_docs/integrations/jira.md b/_docs/integrations/jira.md new file mode 100644 index 000000000..34bb27b66 --- /dev/null +++ b/_docs/integrations/jira.md @@ -0,0 +1,133 @@ +--- +title: "Jira pipeline integration" +description: "" +group: integrations +redirect_from: + - /docs/jira-integration-1/ + - /docs/integrations/jira-integration-1/ +toc: true +--- +Codefresh integrates with Jira in several ways. This article describes how to integrate with Jira in Codefresh for the highest visibility into your GitOps deployments. +Alternatively, you can connect to Jira: +* Through a [custom step]({{site.baseurl}}/docs/integrations/notifications/jira-integration/#use-jira-within-your-codefresh-pipeline) from our step marketplace so that you can connect your pipelines with Jira +* By using your own [jira-cli]({{site.baseurl}}/docs/integrations/notifications/jira-integration/#using-your-own-jira-cli) + +## Prerequisites +* [Codefresh Account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/) +* [Jira Account](https://www.atlassian.com/software/jira){:target="\_blank"} + +## Set up Jira integration in Codefresh + + +When you add a new Jira integration in Codefresh, you can authenticate either using the: +* [Codefresh Marketplace App]({{site.baseurl}}/docs/integrations/notifications/jira-integration/#authenticate-with-the-jira-client-key) + We recommended setting up your Jira integration through our Marketplace App. + > Note that Codefresh currently has to provide you with access to use the Jira Marketplace App. Please get in touch for more information. + +* [Jira Account Details]({{site.baseurl}}/docs/integrations/notifications/jira-integration/#provide-account-details) + +
            + +**Before you begin** + +* To authenticate through the Marketplace App: + * Get the [Organization URL and the Client Key for the Codfresh App from Jira](#jira-integration-with-marketplace-app) + +**How to** + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Atlassian Jira** and then click **Configure**. +1. Click **Add Jira**. +1. In the **Integration Name** field, enter a name for the integration which is used to reference it in `codefresh.yaml`. +1. To restrict access to only Codefresh admins, toggle **Allow access to all users** to OFF. + +1. To integrate with the Codefresh Marketplace App, click **Jira Marketplace App**: + * Paste the **Organization URL** and the **Client Key** you generated for the Codefresh App in Jira. +1. To integrate with your Jira account details, select **User/Pass**, and define the following: + * **Jira URL**: The URL of your organization, for example, `https://company-name.atlassian.net’. + * **Username**: Your Jira username, usually the e-mail with which you are logged in to Jira. + * **Password**: Your Jira password, or alternatively, the Jira Client Key. + +{% include image.html +lightbox="true" +file="/images/integrations/jira/add-jira-password.png" +url="/images/integrations/jira/add-jira-password.png" +alt="Account Information" +max-width="90%" +%} + +{:start="7"} +1. For integration with Jira Marketplace App, to verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. + + + + +## Jira integration with Marketplace App +You need to generate the Organization URL and Client Key for the Codefresh application in Atlassian Jira. + + +1. In the Atlassian Marketplace, go to the [Codefresh Application](https://marketplace.atlassian.com/apps/1224560/codefresh){:target="\_blank"}. + {% include image.html + lightbox="true" + file="/images/integrations/jira/add-app.png" + url="/images/integrations/jira/add-app.png" + alt="Add Codefresh from Jira App Marketplace" + caption="Add Codefresh from Jira App Marketplace" + max-width="90%" + %} + +{:start="2"} +1. To install the application, click **Get it now**. When prompted, confirm the installation. + {% include image.html + lightbox="true" + file="/images/integrations/jira/confirm.png" + url="/images/integrations/jira/confirm.png" + alt="Confirm installation" + caption="Confirm installation" + max-width="90%" + %} + +{:start="3"} +1. When the installation has completed, in your Jira account, go to the **Apps** menu. +1. Click **Manage your apps**. + + {% include image.html + lightbox="true" + file="/images/integrations/jira/manage-apps.png" + url="/images/integrations/jira/manage-apps.png" + alt="Select Manage Apps within Your Jira Account" + caption="Select Manage Apps within Your Jira Account" + max-width="90%" + %} + +{:start="5"} +1. In **User-installed apps**, locate the Codefresh CI/CD platform integration. +1. Click **Configure**. + This will provide you with your Organization URL and the Client Key. + + {% include image.html + lightbox="true" + file="/images/integrations/jira/configure.png" + url="/images/integrations/jira/configure.png" + alt="Account information" + caption="Account information" + max-width="90%" + %} + + +{:start="7"} +1. Copy **Organization URL** and the **Client Key**. You will need these to set up Jira integration with the Codefresh Marketplace App. + + + + +## Using the Jira Integration + +Once Jira is connected to your Codefresh account, you can use both platforms in combination and integrate Jira into your [GitOps workflow]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/). + +## Related articles +[Example for sending notifications to Jira]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-jira/) +[Examples for Codefresh pipelines]({{site.baseurl}}/docs/example-catalog/examples/) +[Create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/integrations/kubernetes.md b/_docs/integrations/kubernetes.md new file mode 100644 index 000000000..364c17511 --- /dev/null +++ b/_docs/integrations/kubernetes.md @@ -0,0 +1,741 @@ +--- +title: "Kubernetes pipeline integration" +description: "How Codefresh supports Kubernetes clusters" +group: integrations +toc: true +--- + +Codefresh is one of the few CI/CD solutions that has native support for Kubernetes clusters, not only for deploying applications to Kubernetes, but also for running pipelines on Kubernetes. + +Codefresh has native support for Kubernetes in the following areas: + +- [Connecting a cluster globally](#connect-a-kubernetes-cluster) +- [Viewing the cluster status]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +- [Viewing the environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +- [Deploying to a cluster with the GUI]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#deploying-a-new-service) +- [Deploying to a cluster with a pipeline]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/) +- [Running pipelines on a cluster]({{site.baseurl}}/docs/installation/codefresh-runner/) + + + + +Codefresh offers its own Kubernetes dashboard that allows you to inspect the services and namespaces +in your cluster. To activate this dashboard, you need to connect your cluster to your Codefresh account first. + +## Connect a Kubernetes cluster + +### Prerequisites + +Codefresh SaaS needs network connectivity to connect to your cluster. +If your cluster is behind a firewall, make sure that you allow access to the [required IPs]({{site.baseurl}}/docs/administration/platform-ip-addresses/). + + +For On-premises and [Hybrid installations]({{site.baseurl}}/docs/administration/behind-the-firewall/), there is no need to tamper with your firewall. + +### Set up Kubernetes integration + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Kubernetes** and then click **Configure**. +1. From the **Add Provider** list, select the Kubernetes provider. + +{% include image.html + lightbox="true" + file="/images/integrations/codefresh-integrations.png" + url="/images/integrations/codefresh-integrations.png" + alt="Codefresh integrations" + caption="Codefresh integrations" + max-width="70%" + %} + + + +#### Adding a GKE Cluster +Adding a cluster in GKE can be done by clicking the **Add cluster** button under **Google Cloud Provider** and selecting the desired project and cluster. + +If this is your first time, you'll be prompted to authenticate using your Google credentials, make sure you're doing so with a user that have access to your GKE projects. + +For GKE cluster versions >=1.19 basic authentication is deprecated. You can add the GKE cluster manually by [using the custom Kubernetes integration option]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/#adding-any-other-cluster-type-not-dependent-on-any-provider) instead. + +{{site.data.callout.callout_info}} + +If you are a new customer of Google Cloud, you are also eligible to receive a Codefresh offer to get up to $500 in Google credits. As soon at the GKE integration is complete within Codefresh, you will get an email with extra details on how to claim your credits. + +Follow the link in the email to fill in an application for the free credits. Once Google approves the application (usually within 1-2 days) your credits will be available to your account. Make sure to check your spam folder for that email. + +{{site.data.callout.end}} + +{:.text-secondary} + +#### Adding an AKS cluster + +To add an Azure cluster, select *Azure AKS* from the drop-down menu instead of *Azure AKS SP*. Click the *Authenticate button* and enter your Azure credentials. You will see a description of all permissions that Codefresh needs +in order to access your cluster. Accept them and Codefresh will connect to Azure to get the cluster information. + +>If you experience difficulties at this point try logging into Azure first in your browser *before* clicking +the authenticate button. Also make sure that you are using an organizational/company Azure account and not a personal one. We are currently working with Microsoft to improve this integration. + +If everything is ready you will see a dialog that allows you to select your Azure subscription and the +cluster name that you wish to use. + +{% include image.html +lightbox="true" +file="/images/integrations/kubernetes/add-cluster/select-aks-cluster.png" +url="/images//integrations/kubernetes/add-cluster/select-aks-cluster.png" +alt="Selecting the Azure cluster" +caption="Selecting the Azure cluster" +max-width="60%" + %} + +Codefresh will query the cluster and show its nodes. You are now ready to [deploy to Azure kubernetes]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/). + +>If you wish for any reason to revoke the granted access from the Azure side, visit [https://account.activedirectory.windowsazure.com/r#/applications](https://account.activedirectory.windowsazure.com/r#/applications) and remove "Codefresh" from the list. + +#### Adding an AKS cluster with a service principal + +An alternative method of adding an Azure cluster is by using a service principal (*Azure AKS SP*). + + +**Before you begin** +* Follow the [instructions for creating a service principal in the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal){:target="\_blank"}. + +**How to** + +1. From the **Add Provider** list, select the **Azure AKS SP**. +1. Click the arrow on the right, and then click **Add Cluster**. +1. Enter the following: + * `Client ID` + * `Tenant` + * `Client secret` + +1. Click **Authenticate**. + +{% include image.html +lightbox="true" +file="/images/kubernetes/integrations/add-cluster/connect-azure-spn.png" +url="/images/kubernetes/integrations/add-cluster/connect-azure-spn.png" +alt="Azure Service principal details" +caption="Azure Service principal details" +max-width="60%" + %} + + + +Codefresh will query the cluster and show its nodes. You are now ready to [deploy to Azure kubernetes]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/). + + +#### Adding EKS Cluster + +To add an Amazon EKS cluster, you must first obtain `kubectl` access to it. Follow the instructions for using the +[AWS CLI](https://aws.amazon.com/premiumsupport/knowledge-center/eks-cluster-connection/){:target="\_blank"} in order to obtain your kubeconfig locally. + +``` +aws eks --region region update-kubeconfig --name cluster_name +``` + +Once you have access via `kubectl` then follow the [instructions](#get-cluster-configuration-manually) to obtain all the cluster details. +To add the Amazon cluster, select *Amazon AWS* from the *ADD PROVIDER* drop-down menu and enter all details in the respective field in the Codefresh UI. + +#### Adding a DigitalOcean cluster + +DigitalOcean also offers a hosted solution for Kubernetes. + +To add a DO cluster select *DigitalOcean* from the *Add provider* menu in your [integration settings](https://g.codefresh.io/account-admin/account-conf/integration/kubernetes). Click the authenticate button and enter your DO account credentials: + +{% include image.html +lightbox="true" +file="/images/kubernetes/integrations/add-cluster/authorize-do.png" +url="/images/kubernetes/integrations/add-cluster/authorize-do.png" +alt="Authorizing DigitalOcean Integration" +caption="Authorizing DigitalOcean Integration" +max-width="35%" + %} + +Click on the checkbox next to your account name and select the *Authorize application* button. Codefresh has now access to your DigitalOcean cluster. You need to authenticate only once. + +{% include image.html +lightbox="true" +file="/images/kubernetes/integrations/add-cluster/do-authorized.png" +url="/images/kubernetes/integrations/add-cluster/do-authorized.png" +alt="DigitalOcean is now authorized" +caption="DigitalOcean is now authorized" +max-width="70%" + %} + +Next, expand the DigitalOcean row from the triangle icon on the right and click on the *Add cluster* button. The drop-down menu should contain all your DigitalOcean Kubernetes clusters. Select the one that you want to connect into Codefresh and click the *Add* button. + +{% include image.html +lightbox="true" +file="/images/kubernetes/add-cluster/add-do-cluster.png" +url="/images/kubernetes/add-cluster/add-do-cluster.png" +alt="Selecing the DigitalOcean cluster" +caption="Selecing the DigitalOcean cluster" +max-width="40%" + %} + +Your cluster is now connected. You should be able to see it your [Kubernetes dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) and start [deploying]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) on it. + +Note that you can als add a DigitalOcean cluster as a generic cluster as well (explained below). + + +#### Adding any other cluster type (not dependent on any provider) + + + +1. To add any other type of cluster, outside of GKE, from the **Add Provider** list, select the **Custom Providers**. + + + +{% include image.html +lightbox="true" +file="/images/integrations/kubernetes/add-cluster/add-cluster-button.png" +url="/images/integrations/kubernetes/add-cluster/add-cluster-button.png" +alt="Adding a custom K8s cluster in Codefresh" +caption="Adding a custom K8s cluster in Codefresh" +max-width="60%" + %} + +The integration between Codefresh and your Kubernetes cluster is API based, and relies on a Kubernetes service account of your choosing that will be used to manage the integration. + +The configurations you'll be required to add are: + + +1. Name: Any name of your choosing, that will represent your cluster context in Codefresh. Do not use spaces, dots or other strange characters in the name. +1. Host: The full URL of the Kubernetes API endpoints including protocol and port. +1. Certificate: The Kubernetes service account certificate used for the integration with Codefresh (base64 encoded). +1. Token: The Kubernetes service account token used for the integration with Codefresh (base64 encoded) +1. (Optional) Namespace: Restrict Codefresh [access to a specific namespace](#restrict-codefresh-access-to-a-specific-namespace) + + +{% include image.html + lightbox="true" + file="/images/integrations/kubernetes/add-cluster/add-cluster-fields.png" + url="/images/integrations/kubernetes/add-cluster/add-cluster-fields.png" + alt="Adding a custom cluster in Codefresh" + caption="Adding a custom cluster in Codefresh" + max-width="80%" + %} + +There is also a toggle for [private clusters behind a firewall]({{site.baseurl}}/docs/reference/behind-the-firewall/). + + In the section below, we'll provide you with easy instructions how to get all your cluster configurations in order to add it to Codefresh. + +### Get cluster configuration manually + +Codefresh accesses any custom cluster using a [service account](https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/){:target="\_blank"}. You can define the privileges Codefresh has on your cluster +using the standard authorization methods (i.e. [RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/){:target="\_blank"}) supported by your Kubernetes infrastructure. + +You need a terminal with `kubectl` access on your cluster. You can even use the "cloud shell" of your +cloud provider for this purpose. + +#### The easy and insecure way + +If you are evaluating Codefresh and want to connect your cluster as fast as possible with no issues +follow these steps: + +>Note that this method is only suggested for non-production clusters, and quick demos. See the next section for the proper way to use Codefresh in production environments. + +First make sure that you are giving commands to the appropriate cluster if you have more than one: + +`Choose cluster` +{% highlight shell %} +{% raw %} +kubectl config use-context +{% endraw %} +{% endhighlight %} + +Then give full admin privileges to the default account. + +`Make default account cluster administrator` +{% highlight shell %} +{% raw %} +kubectl create clusterrolebinding default-admin --clusterrole cluster-admin --serviceaccount=default:default -n default +{% endraw %} +{% endhighlight %} + +Finally run the following commands and copy-paste the result to each Codefresh field in the UI: + +`Host IP` +{% highlight shell %} +{% raw %} +export CURRENT_CONTEXT=$(kubectl config current-context) && export CURRENT_CLUSTER=$(kubectl config view -o go-template="{{\$curr_context := \"$CURRENT_CONTEXT\" }}{{range .contexts}}{{if eq .name \$curr_context}}{{.context.cluster}}{{end}}{{end}}") && echo $(kubectl config view -o go-template="{{\$cluster_context := \"$CURRENT_CLUSTER\"}}{{range .clusters}}{{if eq .name \$cluster_context}}{{.cluster.server}}{{end}}{{end}}") +{% endraw %} +{% endhighlight %} + +`Certificate` +{% highlight shell %} +{% raw %} +echo $(kubectl get secret -o go-template='{{index .data "ca.crt" }}' $(kubectl get sa default -o go-template="{{range .secrets}}{{.name}}{{end}}")) +{% endraw %} +{% endhighlight %} + +`Token` +{% highlight shell %} +{% raw %} +echo $(kubectl get secret -o go-template='{{index .data "token" }}' $(kubectl get sa default -o go-template="{{range .secrets}}{{.name}}{{end}}")) +{% endraw %} +{% endhighlight %} + +Once the cluster been added successfully you can go to the `Kubernetes` tab to start working with the services of your cluster. + +#### The proper/secure way + +For production environments you should create a service account and/or role for Codefresh access. +The minimum permissions Codefresh needs to work with the cluster are the following: + +`codefresh-role.yml` +{% highlight yaml %} +{% raw %} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: codefresh-role +rules: + - apiGroups: [""] + resources: ["*"] + verbs: ["list", "watch", "get"] +{% endraw %} +{% endhighlight %} + +Note that these permissions will only allow Codefresh to read the cluster resources and populate the respective dashboards. You need to give more privileges for actual deployments. For more information see the [Kubernetes RBAC documentation page](https://kubernetes.io/docs/reference/access-authn-authz/rbac/){:target="\_blank"}. + +Here is an example with role + service account + binding. + +`codefresh-role-sa-bind.yml` +{% highlight yaml %} +{% raw %} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: codefresh-role +rules: + - apiGroups: [ "*"] + resources: ["*"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: codefresh-user + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: codefresh-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: codefresh-role +subjects: +- kind: ServiceAccount + name: codefresh-user + namespace: kube-system +{% endraw %} +{% endhighlight %} + +Select the appropriate cluster if you have more than one: + +`Choose cluster` +{% highlight shell %} +{% raw %} +kubectl config use-context +{% endraw %} +{% endhighlight %} + +Create the Codefresh user/role: + +`Apply Codefresh access rules` +{% highlight shell %} +{% raw %} +kubectl apply -f codefresh-role-sa-bind.yml +{% endraw %} +{% endhighlight %} + +Finally run the following commands and copy-paste the result to each Codefresh field in the UI: + +`Host IP` +{% highlight shell %} +{% raw %} +export CURRENT_CONTEXT=$(kubectl config current-context) && export CURRENT_CLUSTER=$(kubectl config view -o go-template="{{\$curr_context := \"$CURRENT_CONTEXT\" }}{{range .contexts}}{{if eq .name \$curr_context}}{{.context.cluster}}{{end}}{{end}}") && echo $(kubectl config view -o go-template="{{\$cluster_context := \"$CURRENT_CLUSTER\"}}{{range .clusters}}{{if eq .name \$cluster_context}}{{.cluster.server}}{{end}}{{end}}") +{% endraw %} +{% endhighlight %} + +`Certificate` +{% highlight shell %} +{% raw %} +echo $(kubectl get secret -n kube-system -o go-template='{{index .data "ca.crt" }}' $(kubectl get sa codefresh-user -n kube-system -o go-template="{{range .secrets}}{{.name}}{{end}}")) +{% endraw %} +{% endhighlight %} + +`Token` +{% highlight shell %} +{% raw %} +echo $(kubectl get secret -n kube-system -o go-template='{{index .data "token" }}' $(kubectl get sa codefresh-user -n kube-system -o go-template="{{range .secrets}}{{.name}}{{end}}")) +{% endraw %} +{% endhighlight %} + +#### The proper/secure way for Kubernetes Cluster 1.24+ + +For production environments, create a service account and/or role for Codefresh access. + +Codefresh needs these minimum permissions to work with the cluster: + +`codefresh-role.yml` +{% highlight yaml %} +{% raw %} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: codefresh-role +rules: + - apiGroups: [“”] + resources: [“*”] + verbs: [“list”, “watch”, “get”] +{% endraw %} +{% endhighlight %} + +Note that these permissions will only allow Codefresh to read the cluster resources and populate the respective dashboards. You need to give more privileges for actual deployments. For more information see the [Kubernetes RBAC documentation page](https://kubernetes.io/docs/reference/access-authn-authz/rbac/){:target="\_blank"}. + +Here is an example with role + service account + binding. + +`codefresh-role-sa-bind.yml` +{% highlight yaml %} +{% raw %} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: codefresh-role +rules: + - apiGroups: [ “*”] + resources: [“*”] + verbs: [“get”, “list”, “watch”, “create”, “update”, “patch”, “delete”] +— +apiVersion: v1 +kind: ServiceAccount +metadata: + name: codefresh-user + namespace: kube-system +— +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: codefresh-user +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: codefresh-role +subjects: +- kind: ServiceAccount + name: codefresh-user + namespace: kube-system +— +apiVersion: v1 +kind: Secret +type: kubernetes.io/service-account-token +metadata: + name: codefresh-user-token + namespace: kube-system + annotations: + kubernetes.io/service-account.name: “codefresh-user” + +{% endraw %} +{% endhighlight %} + +
            + +1. Select the appropriate cluster if you have more than one: +`Choose cluster` +{% highlight shell %} +{% raw %} +kubectl config use-context +{% endraw %} +{% endhighlight %} + +1. Create the Codefresh user/role: + +`Apply Codefresh access rules` +{% highlight shell %} +{% raw %} +kubectl apply -f codefresh-role-sa-bind.yml +{% endraw %} +{% endhighlight %} + +1. Finally run the following commands, and copy-paste the results to the respective Codefresh field in the UI: + +`Host IP` +{% highlight shell %} +{% raw %} +export CURRENT_CONTEXT=$(kubectl config current-context) && export CURRENT_CLUSTER=$(kubectl config view -o go-template=“{{\$curr_context := \”$CURRENT_CONTEXT\” }}{{range .contexts}}{{if eq .name \$curr_context}}{{.context.cluster}}{{end}}{{end}}”) && echo $(kubectl config view -o go-template=“{{\$cluster_context := \”$CURRENT_CLUSTER\”}}{{range .clusters}}{{if eq .name \$cluster_context}}{{.cluster.server}}{{end}}{{end}}”) +{% endraw %} +{% endhighlight %} + +`Certificate` +{% highlight shell %} +{% raw %} +echo $(kubectl get secret -n kube-system -o go-template=‘{{index .data “ca.crt” }}’ codefresh-user-token) +{% endraw %} +{% endhighlight %} + +`Token` +{% highlight shell %} +{% raw %} +echo $(kubectl get secret -n kube-system -o go-template=‘{{index .data “token” }}’ codefresh-user-token) +{% endraw %} +{% endhighlight %} + +#### Restrict Codefresh access to a specific namespace + +In most cases, you want to allow Codefresh to access all namespaces inside the cluster. This is the most convenient option as it will make +the [services dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) (and other GUI dashboards) the central way to manage your clusters. + +You can also restrict Codefresh only to an specific namespace of your choosing. To achieve this, use the details of service account in the previous section that has access only to that specific namespace, and also fill the *namespace* field in the cluster details form. + +{% include image.html + lightbox="true" + file="/images/integrations/kubernetes/add-cluster/restrict-namespace.png" + url="/images/integrations/kubernetes/add-cluster/restrict-namespace.png" + alt="Allows Codefresh access to a single namespace only" + caption="Allows Codefresh access to a single namespace only" + max-width="80%" + %} + +Notice that if you follow this approach several built-in Codefresh capabilities will be disabled (e.g. creating new namespaces from the GUI). + + + +### Adding a Rancher cluster + +Rancher clusters are currently supported as generic clusters. Rancher clusters have a specific authentication configuration (the details are here: [https://rancher.com/kubernetes-authentication-in-rancher-and-rbac](https://rancher.com/kubernetes-authentication-in-rancher-and-rbac){:target="\_blank"} for Rancher 1.x and at [https://rancher.com/blog/2018/2018-05-04-authentication-authorization-rancher2/](https://rancher.com/blog/2018/2018-05-04-authentication-authorization-rancher2/){:target="\_blank"} for Rancher 2.x). + +Authentication using a token of a Kubernetes Service Account, which is usually used by Codefresh, doesn't work with Rancher clusters. Also, Rancher doesn't do proper TLS termination out-of-the-box for Kubernetes clusters hosted on it, so one needs to configure a load balancer for that purpose. + +In summary, the following conditions should be met in order to add the cluster, hosted on Rancher to Codefresh: + +#### For Rancher version 1.x + +1. The token should be taken from the kubeconfig provided by Rancher and it has to be encoded with base64 before putting it into Codefresh. Be careful with the '\n' characters when encoding. The command for Linux is: `echo | tr -d '\n' | base64 | tr -d '\n'`. +1. The CA certificate should be the CA of the Load Balancer standing in front of Rancher. +1. The hostname and port should be corresponding to your Load Balancer. + +{% include image.html + lightbox="true" + file="/images/integrations/kubernetes/add-cluster/rancher-token.png" + url="/images/integrations/kubernetes/add-cluster/rancher-token.png" + alt="Getting the Rancher token" + caption="Getting the Rancher token" + max-width="40%" + %} + +#### For Rancher version 2.x + +1. Kubernetes HOST is in the kubeconfig provided by Rancher for the Kubernetes cluster based on the domain name of Rancher + the Kubernetes cluster endpoint exposed through Rancher in cluster -> server. Example: `https://rancher.localhost/k8s/clusters/c-npft4`. +1. The token should be taken from the kubeconfig provided by Rancher under user -> token section of YAML and it has to be encoded with base64 before putting it into Codefresh. Be careful with the '\n' characters when encoding, do not wrap token in quotes when running echo command. The command for Linux is: `echo | tr -d '\n' | base64 | tr -d '\n'` Example: `kubeconfig-user-xtnt4:cppxv6db…`. +1. The CA certificate should be the CA of the Load Balancer standing in front of Rancher base64 encoded `openssl base64 -in cert -out b64`. And then run this command on the file to remove any white space. `cat b64 | tr -d '\040\011\012\015' > b64_cert` then copy and paste this base64 encoded value into Codefresh UI Cert field. +1. The hostname and port should be corresponding to your Load Balancer. + +{% include image.html + lightbox="true" + file="/images/integrations/kubernetes/add-cluster/rancher-2.png" + url="/images/integrations/kubernetes/add-cluster/rancher-2.png" + alt="Rancher 2.x cluster details" + caption="Rancher 2.x cluster details" + max-width="40%" + %} + + +Once you have all the information follow the instructions for adding a generic Kubernetes cluster in Codefresh as described in the previous section. + + +### Troubleshooting cluster addition + +After adding your cluster configurations and in case the test fails, click "Save" to get the error message back. + +{% include image.html + lightbox="true" + file="/images/integrations/kubernetes/add-cluster/click-save-error-message.png" + url="/images/integrations/kubernetes/add-cluster/click-save-error-message.png" + alt="Get error message for troubleshooting" + caption="Get error message for troubleshooting" + max-width="40%" + %} + +{:.text-secondary} +#### Error: Cannot list namespaces + + `Add Cluster Error` +{% highlight shell %} +{% raw %} +Failed to add cluster: namespaces is forbidden: User "system:serviceaccount:default:default" cannot list namespaces at the cluster scope +{% endraw %} +{% endhighlight %} + +The service account used for the integration doesn't have the minimal permissions required. To fix this add a service account that have the required permissions. + +The easiest way to do this is to create a cluster binding role between the default service account and cluster-admin role: + + `Create cluster binding with admin permissions` +{% highlight shell %} +{% raw %} +kubectl create clusterrolebinding default-admin --clusterrole cluster-admin --serviceaccount=default:default +{% endraw %} +{% endhighlight %} + +### Kubernetes cluster - using an external reverse proxy (edge case) + +In case you're using an external reverse proxy to manage inbound traffic to your Kubernetes API, please read [this article]({{site.baseurl}}/docs/deploy-to-kubernetes/verify-cluster-tls-ssl-configuration/) to make sure your certificate setup are managed correctly in order to add your cluster successfully to Codefresh. + +### Multiple CAs in certificate chain + +Ideally your Kubernetes cluster will have a single certificate which is used directly on the API endpoint. Some organizations +place clusters behind a load balancer or other proxy mechanism that uses a chain or certificates. + +When that happens and you more than one [CA](https://en.wikipedia.org/wiki/Certificate_authority) in your certification chain, you need to provide Codefresh with a [Certificate bundle](https://en.wikipedia.org/wiki/Chain_of_trust) (a file that containers the intermediate CAs as well). + +You will know when this is the case as this error will appear when you try to connect your cluster: + +``` +{"status":400,"code":"1004","name":"BAD_REQUEST_ERROR","message":"Failed to add cluster: unable to get local issuer certificate","context":{}} +``` + +To get the whole certificate open the URL of your Kubernetes in Chrome or Firefox and export all individual certificates as files + +{% include image.html + lightbox="true" + file="/images/kubernetes/add-cluster/cert-hierarchy.png" + url="/images/kubernetes/add-cluster/cert-hierarchy.png" + alt="A Certificate chain" + caption="A Certificate chain" + max-width="60%" + %} + +The steps needed are: + +1. Connect all certificates (apart from the API/endpoint one) to a bundle: +`cat rootCA.crt intermediateCA.crt > ca_bundle_cert`. +1. Run the following to check the validity of the certificate: +`openssl verify -verbose -CAfile ca_bundle_cert k8_server_cert`. +1. If the check above passes fine, go on and run the following on your CA bundle file: +`base64 ca_bundle_cert | tr -d '\n'`. +1. Copy the output string (be careful when copying) and check whether you have copied it correctly: +`openssl x509 -text -in <(echo | base64 -d)` - you should see the contents of your CA bundle file. +1. Put the copied string into the Codefresh Kubernetes integration form and test the connection. + +Please make sure the certs are in order Root -> Intermediate -> Server. + + + + + + + + + +Once you connect a cluster it gets a unique name inside your account that is important when it comes to using this cluster inside a pipeline. From the same screen you can also connect [internal clusters that are behind your firewall]({{site.baseurl}}/docs/reference/behind-the-firewall/#deploying-to-an-internal-kubernetes-cluster/). +. + +## Viewing the Codefresh cluster dashboard + +After you connect a cluster, several graphical dashboards are automatically populated. The first one is the [Codefresh Kubernetes dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/). + +{% + include image.html + lightbox="true" +file="/images/integrations/kubernetes/kubernetes-dashboard.png" +url="/images/integrations/kubernetes/kubernetes-dashboard.png" +alt="Integrated Kubernetes Dashboard" +caption="Integrated Kubernetes Dashboard" +max-width="100%" +%} + +You can use this Dashboard to get basic information for your cluster such as services, pods, deployments etc. + +{% + include image.html + lightbox="true" +file="/images/integrations/kubernetes/change-kubernetes-manifest.png" +url="/images/integrations/kubernetes/change-kubernetes-manifest.png" +alt="Changing a Kubernetes Manifest" +caption="Changing a Kubernetes Manifest" +max-width="100%" +%} + +From the same dashboard you can also add/change configmaps and even edit directly the manifest of a resource. + + + +## Viewing the environment dashboard + +The second dashboard that is enabled after you connect a cluster (but not automatically populated), is the [environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/). + +{% include +image.html +lightbox="true" +file="/images/integrations/kubernetes/environments.png" +url="/images/integrations/kubernetes/environments.png" +alt="Codefresh Environment Dashboard" +caption="Codefresh Environment Dashboard" +max-width="100%" +%} + +This dashboard shows a live view of a Kubernetes application along with the status of the latest builds that affected this environment. You can define such environments either directly from the GUI or [programmatically in a pipeline]({{site.baseurl}}/docs/pipelines/deployment-environments/). + +## Ad-hoc deployments with the Codefresh UI + +One of the [easiest ways to deploy to Kubernetes]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/) is to use the Codefresh UI and [manually deploy a docker image]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#deploying-a-new-service): + +{% include image.html +lightbox="true" +file="/images/integrations/kubernetes/deploy-with-ui.png" +url="/images/integrations/kubernetes/deploy-with-ui.png" +alt="Deploying with the quick UI dialog" +caption="Deploying with the quick UI dialog" +max-width="80%" +%} + +You can also [create a pull Secret]({{site.baseurl}}/docs/deployments/kubernetes/access-docker-registry-from-kubernetes/) from the GUI. + + +## Automated deployments with Codefresh pipelines + +You can also deploy to a cluster in a pipeline. Codefresh offers [several ways for Kubernetes deployments]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/). The important point here is that all connected clusters are automatically available to all pipelines with their unique name as a `kubectl` context. + + {% include +image.html +lightbox="true" +file="/images/getting-started/quick-start-k8s/deployment-build.png" +url="/images/getting-started/quick-start-k8s/deployment-build.png" +alt="Kubernetes deployment in a pipeline" +caption="Kubernetes deployment in a pipeline" +max-width="100%" +%} + + +You can use the [integrated Codefresh deployment methods]({{site.baseurl}}/docs/pipelines/steps/deploy/) or even run [custom kubectl commands directly on your cluster]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/). + +Codefresh also offers a simple solution for [templating]({{site.baseurl}}/docs/deployments/kubernetes/kubernetes-templating/) but you can use another templating methods such as [kustomize]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-with-kustomize/). + + + +## Using a cluster for running CI/CD pipelines + +Finally you can also use the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) to run pipelines in your cluster. + + + +## Related articles +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Cloning Git repositories]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) + diff --git a/_docs/integrations/microsoft-azure.md b/_docs/integrations/microsoft-azure.md new file mode 100644 index 000000000..25c6aa4dc --- /dev/null +++ b/_docs/integrations/microsoft-azure.md @@ -0,0 +1,135 @@ +--- +title: "Microsoft Azure CI integration" +description: "How to use Codefresh with Azure" +group: integrations +redirect_from: + - /docs/microsoft-azure/ + - /docs/deploy-your-containers/microsoft-azure/ +toc: true +--- + +Codefresh has native support for Azure in the following areas: + +- [Integration with Azure Git]({{site.baseurl}}/docs/integrations/git-providers/#azure-devops) +- [Connecting to Azure registries]({{site.baseurl}}/docs/docker-registries/azure-docker-registry/) +- [Deploying to AKS]({{site.baseurl}}/docs/deployments/kubernetes/#adding-aks-cluster) +- [Using Azure Storage for Test reports]({{site.baseurl}}/docs/testing/test-reports/#connecting-azure-storage) +- [Using Azure Storage for Helm charts]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/#private-repository---azure) +- [Azure SSO]({{site.baseurl}}/docs/administration/single-sign-on/sso-azure/) + +## Using Azure Git repositories + +Codefresh can easily check out code from Azure Git repositories: + +{% include +image.html +lightbox="true" +file="/images/integrations/azure/azure-git-integration.png" +url="/images/integrations/azure/azure-git-integration.png" +alt="Azure Git integration" +caption="Azure Git integration" +max-width="70%" +%} + +For more details see the [documentation page]({{site.baseurl}}/docs/integrations/git-providers/#azure-devops). Once your repository is connected, you can use the [native clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) as well as [Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) like all other git providers. + +## Using Azure Docker registries + +Azure Docker registries are fully compliant with the Docker registry API that Codefresh follows. You can connect an Azure Registry like any [other Docker registry]({{site.baseurl}}/docs/docker-registries/). + +{% + include image.html + lightbox="true" +file="/images/integrations/docker-registries/add-azure-registry.png" +url="/images/integrations/docker-registries/add-azure-registry.png" +alt="Adding the Azure Docker registry" +caption="Adding the Azure Docker registry" +max-width="70%" +%} + +Once the registry is added you can the [standard push step]({{site.baseurl}}/docs/pipelines/steps/push/) step in pipelines. + +## Deploying to Azure Kubernetes + +Codefresh has native support for connecting an Azure cluster in the [cluster configuration screen]({{site.baseurl}}/docs/deployments/kubernetes/#connect-a-kubernetes-cluster). + +{% + include image.html + lightbox="true" +file="/images/integrations/azure/aks-integration.png" +url="/images/integrations/azure/aks-integration.png" +alt="Connecting an Azure cluster" +caption="Connecting an Azure cluster" +max-width="40%" +%} + +Once the cluster is connected you can use any of the [available deployment options]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/) for Kubernetes clusters. You also get access to all other Kubernetes dashboards such as the [cluster dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) or the [environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) . + +## Storing test reports in Azure storage + +Codefres has native support for test reports. You can store the reports on Azure storage. + +{% include +image.html +lightbox="true" +file="/images/integrations/azure/azure-storage.png" +url="/images/integrations/azure/azure-storage.png" +alt="Azure cloud storage" +caption="Azure cloud storage" +max-width="50%" +%} + +See the full documentation for [test reports]({{site.baseurl}}/docs/testing/test-reports/). + +## Using Azure storage for storing Helm charts + +You can connect Azure Storage as a Helm repository in the [integrations screen]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/). + +{% include +image.html +lightbox="true" +file="/images/integrations/azure/azure-helm-repo.png" +url="/images/integrations/azure/azure-helm-repo.png" +alt="Using Azure for Helm charts" +caption="Using Azure for Helm charts" +max-width="80%" +%} + +Once you connect your Helm repository you can use it any [Codefresh pipeline with the Helm step]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + +## Azure Single Sign-on + +You can use Azure Active Directory as an [SSO mechanism]({{site.baseurl}}/docs/administration/single-sign-on/) in Codefresh. + +{% include +image.html +lightbox="true" +file="/images/integrations/azure/azure-sso-integration.png" +url="/images/integrations/azure/azure-sso-integration.png" +alt="Azure SSO integration" +caption="Azure SSO integration" +max-width="70%" +%} + +Once configuration is complete all Codefresh users can login using their Azure credentials instead of personal accounts. + +## Traditional Azure deployments + +For any other Azure deployment you can use the [Azure CLI from a Docker image](https://hub.docker.com/_/microsoft-azure-cli){:target="\_blank"} in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +`YAML` +{% highlight yaml %} +{% raw %} + create_my_vm: + title: Creating a VM + image: mcr.microsoft.com/azure-cli + commands: + - az vm create --resource-group TutorialResources --name TutorialVM1 --image UbuntuLTS --generate-ssh-keys +{% endraw %} +{% endhighlight %} + +For authentication see the [Microsoft documentation page](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest){:target="\_blank"}. + +## Related articles +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Cloning Git repositories]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) diff --git a/_docs/integrations/notifications.md b/_docs/integrations/notifications.md new file mode 100644 index 000000000..21b12ae77 --- /dev/null +++ b/_docs/integrations/notifications.md @@ -0,0 +1,15 @@ +--- +title: "Notifications" +description: "" +group: integrations +redirect_from: + - /docs/notifications/ +toc: true +--- +Codefresh enables you to send notifications about events. + +Codefresh supports the following notification channels: +- Email +- [Slack]({{ site.baseurl }}/docs/integrations/notifications/slack-integration/) +- [Jira]({{ site.baseurl }}/docs/integrations/notifications/jira-integration/) + diff --git a/_docs/integrations/notifications/jira-integration.md b/_docs/integrations/notifications/jira-integration.md new file mode 100644 index 000000000..ee47311ce --- /dev/null +++ b/_docs/integrations/notifications/jira-integration.md @@ -0,0 +1,47 @@ +--- +title: "Jira notification integrations for piplines" +description: "" +group: integrations +redirect_from: + - /docs/jira-integration-1/ + - /docs/integrations/jira-integration-1/ +toc: true +--- +Codefresh integrates with Jira in several ways: +* Through the [Jira integration]({{site.baseurl}}/docs/integrations/jira/) for the highest visibility into your GitOps deployments +* Through a [custom step]({{site.baseurl}}/docs/integrations/notifications/jira-integration/#use-jira-within-your-codefresh-pipeline) from our step marketplace so that you can connect your pipelines with Jira +* Alternatively, through using your own [jira-cli]({{site.baseurl}}/docs/integrations/notifications/jira-integration/#using-your-own-jira-cli) + + +## Prerequisites +* [Codefresh Account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/) +* [Jira Account](https://www.atlassian.com/software/jira){:target="\_blank"} + +## Use Jira in your Codefresh pipeline + +The step marketplace offers several freestyle steps that can be used in your Codefresh pipeline through steps. + +One of those steps is the [Jira Issue Manager](https://codefresh.io/steps/step/jira-issue-manager){:target="\_blank"}. +It can be used to: +* Create a Jira issue +* Comment on existing Jira issues +* Change the status of an issue, for example, once the build is successful +* Add a description to your issue +* And more + +More information is provided [directly in the example]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-jira/). + +## Using your own jira-cli + +Alternatively, you can use your own jira-cli by adding the following steps to your Dockerfile: + +{% highlight yaml %} +FROM python:2-alpine +RUN apk add -U gcc musl-dev linux-headers openssl-dev libffi-dev && pip install jira-cli +{% endhighlight %} + +And then running the Dockerfile. + +## Related articles +[Codefresh pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/) +[Create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/integrations/notifications/slack.md b/_docs/integrations/notifications/slack.md new file mode 100644 index 000000000..8300c42c7 --- /dev/null +++ b/_docs/integrations/notifications/slack.md @@ -0,0 +1,66 @@ +--- +title: "Slack" +description: "Get Slack notifications with pipeline integrations" +group: integrations +sub_group: notifications +permalink: /:collection/integrations/notifications/slack-integration/ +toc: true +excerpt: "Integrate Codefresh with Slack to get updates on development and testing progress and feedback." +--- + +You can integrate Slack globally, or for specific pipelines and builds. + +## Set up global Slack integration in Codefresh + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Slack** and then click **Configure**. +1. Select **Quick Setup**. +1. Select the types of notifications you want to receive on Slack. +1. Click **Add to Slack**.
            You are redirected to the Slack sign-in page. +1. Log in with your Slack credentials. +1. Select the check box to enable notifications for Slack. +1. Click **Save**. + +{% include image.html +lightbox="true" +file="/images/integrations/slack/add-slack-integration.png" +url="/images/integrations/slack/add-slack-integration.png" +alt="Add Slack integration" +max-width="50%" +%} + + +Codefresh can now post notifications to Slack, for example, notifications of successful and failed builds, and direct messages received within the Codefresh app. + +## How Slack notifications work + +When you have Slack integration enabled: + +1. All pipelines that are launched automatically by [triggers]({{site.baseurl}}/docs/pipelines/triggers/) send Slack notifications +1. All pipelines that are executed manually do **NOT** send Slack notifications. + +You can override this behavior by toggling the checkbox **Report notification on pipeline execution** under **Advanced Settings** +either in a Git trigger dialog or the Run settings of a pipeline. + +{% include image.html +lightbox="true" +file="/images/integrations/slack/report-notifications.png" +url="/images/integrations/slack/report-notifications.png" +alt="Manual Slack override" +caption="Manual Slack override" +max-width="40%" +%} + +## Individual pipeline Slack integration + +If you wish for more fine-grained control over Slack notifications, then take a look at any of the available slack plugins + +* [https://codefresh.io/steps/step/slack-message-sender](https://codefresh.io/steps/step/slack-message-sender){:target="\_blank"} +* [https://codefresh.io/steps/step/slack-notifier](https://codefresh.io/steps/step/slack-notifier){:target="\_blank"} +* [https://github.com/cloudposse/slack-notifier](https://github.com/cloudposse/slack-notifier){:target="\_blank"} + + +## Related articles +[Git triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Monitoring pipelines]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/) diff --git a/_docs/integrations/secret-storage.md b/_docs/integrations/secret-storage.md new file mode 100644 index 000000000..a7f268fa2 --- /dev/null +++ b/_docs/integrations/secret-storage.md @@ -0,0 +1,158 @@ +--- +title: "Secret Storage" +description: "Manage Kubernetes secrets with Codefresh" +group: integrations +toc: true +--- + +Codefresh can resolve variables storing secrets from remote sources. This allows you to keep sensitive data on your cluster, and for Codefresh to request it during pipeline execution on-demand. + +Secret-Store is an additional context in Codefresh. Codefresh supports two types of secret storage: +* Kubernetes secrets for SaaS versions +* Runtime-Kubernetes for hybrid deployments with Codefresh Runner + +You can set up both types either in the Codefresh UI or via the CLI (`codefresh create context secret-store --help`). + +> This feature is for Enterprise accounts only. + +## Kubernetes secret store setup +Kubernetes the native secrets supported by a cluster. + +### Prerequisites + +* For the Kubernetes secret store, [connect your Kubernetes cluster to Codefresh]({{site.baseurl}}/docs/integrations/kubernetes/connect-a-kubernetes-cluster/). +* Create a Kubernetes secret: + + +### Create a Kubernetes Secret + +Create your secret in Kubernetes: + +``` +kubectl create secret generic my-secret --from-literal=key1=supersecret +``` + +``` +kubectl create configmap my-config-map --from-literal=key1=config1 +``` + + +### Set up Kubernetes secret integration in Codefresh UI + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Secret Store** and then click **Configure**. +1. From the **Add Provider** dropdown, select **Kubernetes**. +1. Do the following: + * **Name**: A unique name given to your context, which will be referenced in `codefresh.yaml`. + * **Resource Type**: Select **Secret** and the data is base64 decoded during resolution. + * **Cluster**: The name of the cluster as it is configured in Codefresh. + * **Namespace**: The namespace where the secret is stored. + * **Resource Name**: Optional. The name of the secret. + * To allow all users in the account access to the secret, enable **Allow access to all users**. + + + +{% include +image.html +lightbox="true" +file="/images/integrations/secret-storage/secrets-ui-view.png" +url="/images/integrations/secret-storage/secrets-ui-view.png" +alt="Kubernetes Secret Store" +caption="Kubernetes Secret Store" +max-width="80%" +%} + +{:start="5"} +1. To apply the changes, click **Save**. + + +### Set up Kubernetes secret integration via Codefresh CLI + +* To create a secret store context for **Kubernetes**, run: +``` +codefresh create context secret-store kubernetes "$NAME_IN_CODEFRESH" --cluster "$CLUSTER" --namespace "$NAMESPACE" --resource-type "$TYPE" --resource-name ”$NAME” +``` +OR, for our example: + +``` +codefresh create context secret-store kubernetes "test" --cluster "anna-demo@FirstKubernetes" --namespace "default" --resource-type secret --resource-name "my-secret" +``` + +where: + +- `$NAME_IN_CODEFRESH` is a unique name given to your context, which will be referenced in `codefresh.yaml` later. +- `$CLUSTER` is the name of the cluster as it is configured in Codefresh. +- `$NAMESPACE` is the Kubernetes namespace where the secret is stored. +- `$TYPE` is of either `secret` or `configmap` + - if `secret`, data will be base64 decoded during resolution + - if `configmap`, data will be replaced as is +- `$RESOURCE_NAME` is optional and is the name of the secret + +## Runtime secret store setup for Codefresh Runner installation + + +For [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) installations, you can also store secrets in your own runtime. + +### Set up runtime secret store in Codefresh UI + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select **Secret Store** and then click **Configure**. +1. From the **Add Provider** dropdown, select **Runtime secret**. +1. Do the following: + * **Name**: A unique name given to your context, which will be referenced in `codefresh.yaml`. + * **Resource Name**: The name of the secret. + * **Resource Type**: Select the type of secret . - if `secret`, data will be base64 decoded during resolution + - if `configmap`, data will be replaced as is + * **Runtime Environment**: Select the runtime environment with the secret. + * To allow all users in the account access to the secret, enable **Allow access to all users**. + +{% include +image.html +lightbox="true" +file="/images/integrations/secret-storage/secrets-ui-view2.png" +url="/images/integrations/secret-storage/secrets-ui-view2.png" +alt="Runtime Secret Store" +caption="Runtime Secret Store" +max-width="80%" +%} + +{:start="5"} + +1. To apply the changes, click **Save**. + + + +### Set up runtime secret store with Codefresh CLI + +To create a secret store context for **Runtime-Kubernetes** environments ([behind the firewall]({{site.baseurl}}/docs/reference/codefresh-runner/)), run: + +``` +codefresh create context secret-store kubernetes-runtime "$NAME_IN_CODEFRESH" --runtime "$RUNTIME_NAME" --resource-type "$TYPE" --resource-name ”$NAME” +``` + +or, for our example: + +``` +codefresh create context secret-store kubernetes-runtime "test" --runtime "gke_firstkubernetes-176201_us-central1-a_anna-demo" --resource-type secret --resource-name "my-secret" +``` + +where: + +- `$NAME_IN_CODEFRESH` is a unique name given to your context, which will be referenced in `codefresh.yaml` later. +- `$CLUSTER` is the name of the cluster as it is configured in Codefresh +- `$NAMESPACE` is the Kubernetes namespace +- `$TYPE` is of either `secret` or `configmap` + - if `secret`, data will be base64 decoded during resolution + - if `configmap`, data will be replaced as is +- `$RESOURCE_NAME` is the name of the secret (optional) +- `$RUNTIME_NAME` is the name of the run-time environment to be configured as secret store. If not set, *any* runtime-environment will be considered. + +## Using the secrets + +Once Codefresh is linked to your secrets, you can use them either in pipelines or any relevant section in the Codefresh UI. For details, see [Using secrets]({{site.baseurl}}/docs/pipelines/secrets-store/) for the details. + +## Related articles +[Shared Configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) +[Git integration for pipelines]({{site.baseurl}}/docs/integrations/git-providers/) +[Kubernetes integration for pipelines]({{site.baseurl}}/docs/integrations/kubernetes/) +[Container registry integration for pipelines]({{site.baseurl}}/docs/integrations/docker-registries/) diff --git a/_docs/migration/gitops-dashboard.md b/_docs/migration/gitops-dashboard.md deleted file mode 100644 index 17429bace..000000000 --- a/_docs/migration/gitops-dashboard.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "GitOps dashboard" -description: "" -group: migration -toc: true ---- - -Coming soon diff --git a/_docs/migration/pipelines.md b/_docs/migration/pipelines.md deleted file mode 100644 index 176f096c8..000000000 --- a/_docs/migration/pipelines.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Pipelines" -description: "" -group: migration -toc: true ---- - -Coming soon diff --git a/_docs/pipelines/advanced-workflows.md b/_docs/pipelines/advanced-workflows.md new file mode 100644 index 000000000..8bb797dcd --- /dev/null +++ b/_docs/pipelines/advanced-workflows.md @@ -0,0 +1,972 @@ +--- +title: "Advanced workflows with parallel steps" +description: "Create complex workflows in Codefresh with step dependencies" +group: pipelines +toc: true +--- + +Codefresh is very flexible when it comes to pipeline complexity and depth. + +You can easily create: + * Sequential pipelines where step order is the same as the listing order in YAML (simple) + * Sequential pipelines that have some parallel parts (intermediate) + * Parallel pipelines where step order is explicitly defined (advanced) + +With the parallel execution mode, you can define complex pipelines with fan-in/out configurations capable of matching even the most complicated workflows within an organization. + +>In Codefresh, parallel execution is unrelated to [stages]({{site.baseurl}}/docs/codefresh-yaml/stages/). Stages are only a way to visually organize your pipeline steps. The actual execution is independent from the visual layout in the logs view. + +Before going any further make sure that you are familiar with the [basics of Codefresh pipelines]({{site.baseurl}}/docs/configure-ci-cd-pipeline/introduction-to-codefresh-pipelines/). + +Codefresh offers two modes of execution: + +1. Sequential mode (which is the default) +1. Parallel mode + +## Sequential execution mode + +The sequential mode is very easy to understand and visualize. + +In sequential mode, the Codefresh execution engine starts from the first step defined at the top of the `codefresh.yml` file, and executes all steps one by one going down to the end of the file. A step is either executed or skipped according to its conditions. + +>The condition for each step is only examined **once**. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +mode: sequential +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: sample-python-image + working_directory: ./ + tag: ${{CF_BRANCH_TAG_NORMALIZED}} + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +Here we have two steps, one that creates a Docker image and a second one that runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) inside it. The order of execution is identical to the order of the steps in the YAML file. This means that unit tests will always run after the Docker image creation. + +Notice that the line `mode: sequential` is shown only for illustration purposes. Sequential mode is the default, and therefore this line can be omitted. + + +## Inserting parallel steps in a sequential pipeline + +You don't have to activate parallel execution mode for the whole pipeline if only a part of it needs to run in parallel. Codefresh allows you insert a parallel phase inside a sequential pipeline with the following syntax: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_task1: + title: My Task 1 + [...] + my_parallel_tasks: + type: parallel + steps: + my_task2a: + title: My Task 2A + [...] + my_task2b: + title: My Task 2B + [...] + my_task3: + title: My Task3 + [...] +{% endraw %} +{% endhighlight %} + + +In this case tasks 2A and 2B will run in parallel. +The step name that defines the parallel phase (`my_parallel_tasks` in the example above), is completely arbitrary. + +The final order of execution will be + +1. Task 1 +1. Task 2A and Task2B at the same time +1. Task 3 + +This is the recommended way to start using parallelism in your Codefresh pipelines. It is sufficient for most scenarios that require parallelism. + +>The step names must be unique within the same pipeline. The parent and child steps should NOT share the same name. + +### Example: pushing multiple Docker images in parallel + +Let's see an example where a Docker image is created and then we push it to more than one registry. This is a perfect candidate for parallelization. Here is the `codefresh.yml`: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- build +- push +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'build' + type: build + image_name: trivialgoweb + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + PushingToRegistries: + type: parallel + stage: 'push' + steps: + jfrog_PushingTo_jfrog_BintrayRegistry: + type: push + title: jfrog_Pushing To Bintray Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: bintray + PushingToGoogleRegistry: + type: push + title: Pushing To Google Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: gcr + PushingToDockerRegistry: + type: push + title: Pushing To Dockerhub Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + image_name: kkapelon/trivialgoweb + registry: dockerhub +{% endraw %} +{% endhighlight %} + +The order of execution is the following: + +1. MyAppDockerImage ([build step]({{site.baseurl}}/docs/pipelines/steps/build/)) +1. jfrog_PushingTo_jfrog_BintrayRegistry, PushingToGoogleRegistry, PushingToDockerRegistry ([push steps]({{site.baseurl}}/docs/pipelines/steps/push/)) + +The pipeline view for this yaml file is the following. + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/parallel-push.png" +url="/images/codefresh-yaml/parallel-push.png" +alt="Parallel Docker push" +caption="Parallel Docker push" +max-width="80%" +%} + +As you can see we have also marked the steps with [stages]({{site.baseurl}}/docs/pipelines/stages/) so that we get a visualization that matches the execution. + + +### Example: Running multiple test suites in parallel + +All types of steps can by placed inside a parallel phase. Another common use case would be the parallel execution of [freestyle steps]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) for unit/integration tests. + +Let's say that you have a Docker image with a Python back-end and a JavaScript front-end. You could run both types of tests in parallel with the following yaml syntax: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-full-stack-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + MyTestingPhases: + type: parallel + steps: + my_back_end_tests: + title: Running Back end tests + image: ${{MyAppDockerImage}} + commands: + - python setup.py test + my_front_end_tests: + title: Running Front End tests + image: ${{MyAppDockerImage}} + commands: + - npm run test +{% endraw %} +{% endhighlight %} + +Running different types of tests (unit/integration/load/acceptance) in parallel is a very common use case for parallelism inside an otherwise sequential pipeline. + +### Defining success criteria for a parallel step + +By default, any failed step in a Codefresh pipeline will fail the whole pipeline. There are ways to change this behavior (the `fail_fast` property is explained later in this page), but specifically for parallel steps you can define exactly when the whole step succeeds or fails. + +You can define steps that will be used to decide if a parallel step succeeds with this syntax: + +{% highlight yaml %} +second_step: + title: Second step + success_criteria: + steps: + only: + - my_unit_tests + type: parallel + steps: + my_unit_tests: + title: Running Back end tests + image: node + commands: + - npm run test + my_integration_tests: + title: Running Integration tests + image: node + commands: + - npm run int-test + my_acceptance_tests: + title: Running Acceptance tests + image: node + commands: + - npm run acceptance-test +{% endhighlight %} + +In the example above, if integration and/or acceptance tests fail, the whole pipeline will continue, because we have defined that only the results of unit test matter for the whole parallel step. + +The reverse relationship (i.e., defining steps to be ignored) can be defined with the following syntax + +{% highlight yaml %} +second_step: + title: Second step + success_criteria: + steps: + ignore: + - my_integration_tests + - my_acceptance_tests + type: parallel + steps: + my_unit_tests: + title: Running Back end tests + image: node + commands: + - npm run test + my_integration_tests: + title: Running Integration tests + image: node + commands: + - npm run int-test + my_acceptance_tests: + title: Running Acceptance tests + image: node + commands: + - npm run acceptance-test +{% endhighlight %} + +In the example above we have explicitly defined that even if the integration or acceptance tests fail the whole pipeline will continue. + +### Shared Codefresh volume and race conditions + +In any pipeline step, Codefresh automatically attaches a [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) that is used to transfer artifacts between steps. The same volume is also shared between steps that run in parallel. + + +Here is an example where two parallel steps are writing two files. After they finish execution, we list the contents of the project folder. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + WritingInParallel: + type: parallel + steps: + writing_file_1: + title: Step1A + image: alpine + commands: + - echo "Step1A" > first.txt + writing_file_2: + title: Step1B + image: alpine + commands: + - echo "Step1B" > second.txt + MyListing: + title: Listing of files + image: alpine + commands: + - ls +{% endraw %} +{% endhighlight %} + +The results from the `MyListing` step is the following: + +``` +first.txt second.txt +``` + +This illustrates the side effects for both parallel steps that were executed on the same volume. + +>It is therefore your responsibility to make sure that steps that run in parallel play nice with each other. Currently, Codefresh performs no conflict detection at all. If there are race conditions between your parallel steps, (e.g. multiple steps writing at the same files), the final behavior is undefined. It is best to start with a fully sequential pipeline, and use parallelism in a gradual manner if you are unsure about the side effects of your steps + +## Implicit parallel steps +> If you use implicit parallel steps, you _cannot_ use _parallel pipeline mode_. + +In all the previous examples, all parallel steps have been defined explicitly in a pipeline. This works well for a small number of steps, but in some cases it can be cumbersome to write such a pipeline, especially when the parallel steps are similar. + +Codefresh offers two handy ways to lessen the amount of YAML you have to write and get automatic parallelization with minimum effort. + +* The `scale` syntax allows you to quickly create parallel steps that are mostly similar (but still differ) +* The `matrix` syntax allows you to quickly create parallel steps for multiple combinations of properties + +### Scale parallel steps (one dimension) + +If you look back at the parallel docker push example you will see that all push steps are the same. The only thing that changes is the registry that they push to. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- build +- push +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'build' + type: build + image_name: trivialgoweb + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + PushingToRegistries: + type: parallel + stage: 'push' + steps: + jfrog_PushingTo_jfrog_BintrayRegistry: + type: push + title: jfrog_Pushing To Bintray Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: bintray + PushingToGoogleRegistry: + type: push + title: Pushing To Google Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: gcr + PushingToDockerRegistry: + type: push + title: Pushing To Dockerhub Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + image_name: kkapelon/trivialgoweb + registry: dockerhub +{% endraw %} +{% endhighlight %} + + +This pipeline can be simplified by using the special `scale` syntax to create a common parent step with all similarities: + + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- build +- push +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'build' + type: build + image_name: trivialgoweb + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + PushingToRegistries: + stage: 'push' + type: push + tag: '${{CF_SHORT_REVISION}}' + candidate: ${{MyAppDockerImage}} + scale: + jfrog_PushingTo_jfrog_BintrayRegistry: + registry: bintray + PushingToGoogleRegistry: + registry: gcr + PushingToDockerRegistry: + image_name: kkapelon/trivialgoweb + registry: dockerhub +{% endraw %} +{% endhighlight %} + +You can see now that all common properties are defined once in the parent step (`PushingToRegistries`) while each push step only contains what differs. Codefresh will automatically create parallel steps when it encounters the `scale` syntax. + +The resulting pipeline is more concise but runs in the same manner as the original YAML. For a big number of parallel steps, the `scale` syntax is very helpful for making the pipeline definition more clear. + +You can use the `scale` syntax with all kinds of steps in Codefresh and not just push steps. Another classic example would be running tests in parallel with different environment variables. + + +`YAML` +{% highlight yaml %} +{% raw %} + run_tests_in_parallel: + stage: 'Microservice A' + working_directory: './my-front-end-code' + image: node:latest + commands: + - npm run test + scale: + first: + environment: + - TEST_NODE=0 + second: + environment: + - TEST_NODE=1 + third: + environment: + - TEST_NODE=2 + fourth: + environment: + - TEST_NODE=3 +{% endraw %} +{% endhighlight %} + +This pipeline will automatically create 4 parallel freestyle steps. All of them will use the same Docker image and executed the same command (`npm run test`) but each one will receive a different value for the environment variable called `TEST_NODE`. + +Notice that if you define environment variables on the parent step (`run_tests_in_parallel` in the example above), they will also be available on the children parallel steps. And if those define, environment variables as well, all environment variables will be available. + + +### Matrix parallel steps (multiple dimensions) + +The `scale` syntax allows you to easily create multiple parallel steps that differ only in a single dimension. If you have multiple dimensions of properties that differ and you want to run all possible combinations (Cartesian product) then the `matrix` syntax will do that for you automatically. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-unit-test' + revision: 'master' + git: github + stage: prepare + run_my_tests_before_build: + stage: test + working_directory: './golang-app-A' + commands: + - go test -v + matrix: + image: + - golang:1.11 + - golang:1.12 + - golang:1.13 + environment: + - [CGO_ENABLED=1] + - [CGO_ENABLED=0] +{% endraw %} +{% endhighlight %} + +Here we want run unit tests with 3 different versions of GO and also try with CGO enabled or not. Instead of manually writing 6 parallel steps in your pipeline with all possible combinations, we can simply use the `matrix` syntax to create the following parallel steps: + +* Go 1.11 with CGO enabled +* Go 1.11 with CGO disabled +* Go 1.12 with CGO enabled +* Go 1.12 with CGO disabled +* Go 1.13 with CGO enabled +* Go 1.13 with CGO disabled + +The resulting Codefresh YAML is much more compact. Notice that because the `environment` property in Codefresh is already an array on its own, when we use it with the `matrix` syntax we need to enclose its value with `[]` (array of arrays). + +You can add more dimensions to a matrix build (and not just two as shown in the example). Here is another example with 3 dimensions: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + MyUnitTests: + stage: test + matrix: + image: + - 'maven:3.5.2-jdk-8-alpine' + - 'maven:3.6.2-jdk-11-slim' + - 'maven:3-jdk-8' + commands: + - ["mvn --version", "mvn -Dmaven.repo.local=/codefresh/volume/m2_repository test"] + - ["mvn --version", "mvn -Dmaven.test.skip -Dmaven.repo.local=/codefresh/volume/m2_repository package"] + environment: + - [MAVEN_OPTS=-Xms1024m] + - [MAVEN_OPTS=-Xms512m] +{% endraw %} +{% endhighlight %} + +This pipeline creates 3 x 2 x 2 = 12 parallel steps with all the possible combinations of: + +* Maven version +* Running or disabling tests +* Using 1GB or 512MBs of memory. + +Remember that all parallel steps run within the same pipeline executor so make sure that you have enough resources as the number +of matrix variations can quickly grow if you add too many dimensions. + +Notice that, as with the `scale` syntax, the defined values/properties are merged between parent step (`MyUnitTests` in the example above) and children steps. For example, if you set an environment variable on the parent and also on child matrix steps , the result will a merged environment where all values are available. + +## Parallel pipeline execution +> If you use parallel execution mode for pipelines, you _cannot_ use _implicit parallel steps_. + +To activate advanced parallel mode for the whole pipeline, you need to declare it explicitly at the root of the `codefresh.yml` file: + +``` +version: '1.0' +mode: parallel +steps: +[...] +``` + +In full parallel mode, the order of steps inside the `codefresh.yml` **does not** affect the order of execution at all. The Codefresh pipeline engine instead: + +1. Evaluates all step-conditions *at the same* time +2. Executes those that have their requirements met +3. Starts over with the remaining steps +4. Stops when there are no more steps to evaluate + +This means that in parallel mode the conditions of a step are evaluated **multiple times** as the Codefresh execution engine tries to find which steps it should run next. This implication is very important when you try to understand the order of step execution. + +Notice also that in parallel mode, if you don't define any step conditions, Codefresh will try to run **all** steps at once, which is probably not what you want in most cases. + +With parallel mode you are expected to define the order of steps in the yaml file, and the Codefresh engine will create a *graph* of execution that satisfies your instructions. This means that writing the `codefresh.yml` file requires more effort on your part, but on the other hand allows you to define the step order in ways not possible with the sequential mode. You also need to define which steps should depend on the automatic cloning of the pipeline (which is special step named `main_clone`). + +In the next sections we describe how you can define the steps dependencies in a parallel pipeline. + +### Single step dependencies + +At the most basic level, you can define that a step *depends on* the execution of another step. This dependency is very flexible as Codefresh allows you run a second step once: + +1. The first step is finished with success +1. The first step is finished with failure +1. The first completes (regardless of exit) status + +The syntax for this is the following post-condition: + +{% highlight yaml %} +second_step: + title: Second step + when: + steps: + - name: first_step + on: + - success +{% endhighlight %} + +If you want to run the second step only if the first one fails the syntax is: + +{% highlight yaml %} +second_step: + title: Second step + when: + steps: + - name: first_step + on: + - failure +{% endhighlight %} + +Finally, if you don't care about the completion status the syntax is: + +{% highlight yaml %} +second_step: + title: Second step + when: + steps: + - name: first_step + on: + - finished +{% endhighlight %} + +Notice that `success` is the default behavior so if you omit the last two lines (i.e., the `on:` part) the second step +will wait for the next step to run successfully. + +>Also notice that the name `main_clone` is reserved for the automatic clone that takes place in the beginning of pipelines that are linked to a git repository. You need to define which steps depend on it (probably the start of your graph) so that `git checkout` happens before the other steps. + +As an example, let's assume that you have the following steps in a pipeline: + +1. A build step that creates a Docker image +1. A freestyle step that runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) inside the Docker image +1. A freestyle step that runs [integrations tests]({{site.baseurl}}/docs/testing/integration-tests/) *After* the unit tests, even if they fail +1. A cleanup step that runs after unit tests if they fail + +Here is the full pipeline. Notice the explicit dependency to the `main_clone` step that checks out the code. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +mode: parallel +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-node-js-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + when: + steps: + - name: main_clone + on: + - success + MyUnitTests: + title: Running unit tests + image: ${{MyAppDockerImage}} + fail_fast: false + commands: + - npm run test + when: + steps: + - name: MyAppDockerImage + on: + - success + MyIntegrationTests: + title: Running integration tests + image: ${{MyAppDockerImage}} + commands: + - npm run integration-test + when: + steps: + - name: MyUnitTests + on: + - finished + MyCleanupPhase: + title: Cleanup unit test results + image: alpine + commands: + - ./cleanup.sh + when: + steps: + - name: MyUnitTests + on: + - failure +{% endraw %} +{% endhighlight %} + +If you run the pipeline you will see that Codefresh automatically understands that `MyIntegrationTests` and `MyCleanupPhase` can run in parallel right after the unit tests finish. + +Also notice the `fail_fast: false` line in the unit tests. By default, if *any* steps fails in a pipeline the whole pipeline is marked as a failure. With the `fail_fast` directive we can allow the pipeline to continue so that other steps that depend on the failed step can still run even. + + +### Multipl step dependencies + +A pipeline step can also depend on multiple other steps. + +The syntax is: + +{% highlight yaml %} +third_step: + title: Third step + when: + steps: + all: + - name: first_step + on: + - success + - name: second_step + on: + - finished +{% endhighlight %} + +In this case, the third step will run only when BOTH first and second are finished (and first is actually a success) + +*ALL* is the default behavior so it can be omitted if this is what you need. The example above +is example the same as below: + +{% highlight yaml %} +third_step: + title: Third step + when: + steps: + - name: first_step + on: + - success + - name: second_step + on: + - finished +{% endhighlight %} + +Codefresh also allows you to define *ANY* behavior in an explicit manner: + +{% highlight yaml %} +third_step: + title: Third step + when: + steps: + any: + - name: first_step + on: + - success + - name: second_step + on: + - finished +{% endhighlight %} + +Here the third step will run when either the first one *OR* the second one have finished. + +As an example let's assume this time that we have: + +1. A build step that creates a docker image +1. Unit tests that will run when the docker image is ready +1. Integration tests that run either after unit tests or if the docker image is ready (contrived example) +1. A cleanup step that runs when both kinds of tests are finished + +Here is the full pipeline + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +mode: parallel +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-node-js-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + MyUnitTests: + title: Running unit tests + image: ${{MyAppDockerImage}} + fail_fast: false + commands: + - npm run test + when: + steps: + - name: MyAppDockerImage + on: + - success + MyIntegrationTests: + title: Running integration tests + image: ${{MyAppDockerImage}} + commands: + - npm run integration-test + when: + steps: + any: + - name: MyUnitTests + on: + - finished + - name: MyAppDockerImage + on: + - success + MyCleanupPhase: + title: Cleanup unit test results + image: alpine + commands: + - ./cleanup.sh + when: + steps: + all: + - name: MyUnitTests + on: + - finished + - name: MyIntegrationTests + on: + - finished +{% endraw %} +{% endhighlight %} + +In this case Codefresh will make sure that cleanup happens only when both unit and integration tests are finished. + + +### Custom step dependencies + +For maximum flexibility you can define a custom condition for a step. + +It is hard to describe all possible cases, because Codefresh supports a [mini DSL]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/#condition-expression-syntax) for conditions. All examples mentioned in conditional execution are still valid in parallel pipelines. + +For example, run this step only if a PR is opened against the production branch: + +{% highlight yaml %} +{% raw %} +my_step: + title: My step + when: + condition: + all: + validateTargetBranch: '"${{CF_PULL_REQUEST_TARGET}}" == "production"' + validatePRAction: '''${{CF_PULL_REQUEST_ACTION}}'' == ''opened''' +{% endraw %} +{% endhighlight %} + +Run this step only for the master branch and when the commit message does not include "skip ci": + +{% highlight yaml %} +{% raw %} +my_step: + title: My step + when: + condition: + all: + noSkipCiInCommitMessage: 'includes(lower("${{CF_COMMIT_MESSAGE}}"), "skip ci") == false' + masterBranch: '"${{CF_BRANCH}}" == "master"' +{% endraw %} +{% endhighlight %} + +You can now add extra conditions regarding the completion state of specific steps. A global object called `steps` contains all steps by name along with a `result` property with the following possible completion states: + +* Success +* Failure +* Skipped (only valid in sequential mode) +* Finished (regardless of status) +* Pending +* Running + +Finished is a shorthand for `success` or `failure` or `skipped`. It is only valid when used in [step dependencies]({{site.baseurl}}/docs/codefresh-yaml/advanced-workflows/#single-step-dependencies), and cannot be used in custom conditions. + +You can mix and match completion states from any other step in your pipeline. Here are some examples: + +{% highlight yaml %} +my_step: + title: My step + when: + condition: + all: + myCondition: steps.MyUnitTests.result == 'failure' || steps.MyIntegrationTests.result == 'failure' +{% endhighlight %} + +{% highlight yaml %} +my_step: + title: My step + when: + condition: + any: + myCondition: steps.MyLoadTesting.result == 'success' + myOtherCondition: steps.MyCleanupStep.result == 'success' +{% endhighlight %} + +You can also use conditions in the success criteria for a parallel step. Here is an example + +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- start +- tests +- cleanup +steps: + MyAppDockerImage: + stage: 'start' + title: Building Docker Image + type: build + image_name: my-full-stack-app + working_directory: ./01_sequential/ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + MyTestingPhases: + type: parallel + stage: 'tests' + success_criteria: + condition: + all: + myCondition: ${{steps.my_back_end_tests.result}} === 'success' && ${{steps.my_front_end_tests.result}} === 'success' + steps: + my_back_end_tests: + title: Running Back end tests + image: ${{MyAppDockerImage}} + commands: + - exit 1 + my_front_end_tests: + title: Running Front End tests + image: ${{MyAppDockerImage}} + commands: + - echo "Second" + MyCleanupPhase: + stage: 'cleanup' + title: Cleanup unit test results + image: alpine + commands: + - echo "Finished" +{% endraw %} +{% endhighlight %} + + +## Handling error conditions in a pipeline + +It is important to understand the capabilities offered by Codefresh when it comes to error handling. You have several options in different levels of granularity to select what constitutes a failure and what not. + +By default, *any* failed step in a pipeline will abort the whole pipeline and mark it as failure. + +You can use the directive `fail_fast: false`: +* In a specific step to mark it as ignored if it fails +* At the root level of the pipeline if you want to apply it to all steps + +Therefore, if you want your pipeline to keep running to completion regardless of errors the following syntax is possible: + +``` +version: '1.0' +fail_fast: false +steps: +[...] +``` + +You also have the capability to define special steps that will run when the whole pipeline has a special completion status. Codefresh offers a special object called `workflow` that represents the whole pipeline and allows you to evaluate its status in a step. + +For example, you can have a cleanup step that will run only if the workflow fails (regardless of the actual step that created the error) with the following syntax: + +{% highlight yaml %} +my_cleanup_step: + title: My Pipeline Cleanup + when: + condition: + all: + myCondition: workflow.result == 'failure' +{% endhighlight %} + +As another example we have a special step that will send an email if the pipeline succeeds or if load-tests fail: + +{% highlight yaml %} +my_email_step: + title: My Email step + when: + condition: + any: + myCondition: workflow.result == 'success' + myTestCondition: steps.MyLoadTesting.result == 'failure' +{% endhighlight %} + +Notice that both examples assume that `fail_fast: false` is at the root of the `codefresh.yaml` file. + +The possible values for `workflow.result` are: + +* `running` +* `terminated` +* `failure` +* `pending-approval` +* `success` + + +## Related articles +[Variables in pipelines]({{site.baseurl}}/docs/pipelines/variables/) +[Hooks in pipelines]({{site.baseurl}}/docs/pipelines/hooks/) + + + + + + + + diff --git a/_docs/pipelines/annotations.md b/_docs/pipelines/annotations.md new file mode 100644 index 000000000..036bf41e5 --- /dev/null +++ b/_docs/pipelines/annotations.md @@ -0,0 +1,301 @@ +--- +title: "Annotations in CI pipelines" +description: "Mark your builds and projects with extra annotations" +group: pipelines +toc: true +--- + +Codefresh supports the annotations of several entities with custom annotations. You can use these annotations to store any optional information that you wish to keep associated with each entity. Examples would be storing the test coverage for a particular build, or a special settings file for a pipeline. + +Currently Codefresh supports extra annotations for: + +* Projects +* Pipelines +* Builds +* Docker images + +You can view/edit annotations using the [Codefresh CLI](https://codefresh-io.github.io/cli/annotations/) or directly in the Codefresh Web UI. + +>Notice that the syntax shown in this page is deprecated but still supported. For the new syntax +see [hooks]({{site.baseurl}}/docs/pipelines/hooks/). + + +## Adding annotations + +In the most basic scenario you can use the [post operations]({{site.baseurl}}/docs/pipelines/post-step-operations/) of any Codefresh [step]({{site.baseurl}}/docs/pipelines/steps/) to add annotations: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_custom_step: + title: Adding annotations to a project + image: alpine:3.9 + commands: + - echo "Hello" + on_success: + annotations: + set: + - entity_id: annotate-examples + entity_type: project + annotations: + - my_annotation_example1: 10.45 + - my_empty_annotation + - my_string_annotation: Hello World +{% endraw %} +{% endhighlight %} + + +This pipeline adds three annotations to a project called `annotate-examples`. The name of each annotation can only contain letters (upper and lowercase), numbers and the underscore character. The name of each annotation must start with a letter. + + +For the `entity_id` value you can also use an actual ID instead of a name. The `entity_id` and `entity_type` are define which entity will hold the annotations. The possible entity types are: + +* `project` (for a project, even a different one) +* `pipeline` (for a pipeline, even a different one) +* `build` (for a build, even a different one) +* `image` (for a docker image) + +If you don't define them, then by default the current build will be used with these values: +* `entity_id` is `{% raw %}${{CF_BUILD_ID}}{% endraw %}` (i.e. the current build) +* `entity_type` is `build` + +Here is another example where we add annotations to another pipeline as well as another build (instead of the current one) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_custom_step: + title: Adding annotations to multiple entities + image: alpine:3.9 + commands: + - echo "Hello" + on_success: + annotations: + set: + - entity_id: my-project/my-basic-pipeline + entity_type: pipeline + annotations: + - my_annotation_example1: 10.45 + - my_empty_annotation + - my_string_annotation: Hello World + - entity_id: 5ce2a0e869e2ed0a60c1e203 + entity_type: build + annotations: + - my_coverage: 70 + - my_url_example: http://www.example.com +{% endraw %} +{% endhighlight %} + +It is therefore possible to store annotations on any Codefresh entity (and not just the ones that are connected to the build that is adding annotations). + +## Viewing/editing annotations + +You can view the annotations using the Codefresh CLI + +```shell +codefresh get annotation project annotate-examples +``` + +You can also view annotations within the Codefresh UI. + +For build annotations click the *Annotations* on the build details: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png" +alt="Viewing Build annotations" +caption="Viewing Build annotations" +max-width="80%" +%} + +For pipeline annotations click the *Annotations* button in the pipeline list view: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png" +alt="Viewing Pipeline annotations" +caption="Viewing Pipeline annotations" +max-width="80%" +%} + +For project annotations click the *Annotations* button in the project list view: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png" +alt="Viewing project annotations" +caption="Viewing project annotations" +max-width="80%" +%} + +In all cases you will see a dialog with all existing annotations. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png" +alt="Editing annotations" +caption="Editing annotations" +max-width="50%" +%} + +You can add additional annotations manually by clicking the *Add annotation* button and entering: + +* The name of the annotation +* The type of the annotation (text, number, percentage, link, boolean) +* The desired value + +Click *Save* to apply your changes. + +## Complex annotation values + +Apart from scalar values, you can also store more complex expressions in annotations. You have access to all [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/), text files from the build and even evaluations from the [expression syntax]({{site.baseurl}}/docs/pipelines/condition-expression-syntax/). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'kostis-codefresh/nestjs-example' + revision: '${{CF_REVISION}}' + my_custom_step: + title: Complex annotations + image: alpine:3.9 + commands: + - echo "Hello" + - echo "Sample content" > /tmp/my-file.txt + on_finish: + annotations: + set: + - entity_id: annotate-examples/simple + entity_type: pipeline + annotations: + - qa: pending + - commit_message: ${{CF_COMMIT_MESSAGE}} + - is_main_branch: + evaluate: "'${{CF_BRANCH}}' == 'main'" + - my_json_file: "file:/tmp/my-file.txt" + - my_docker_file: "file:Dockerfile" +{% endraw %} +{% endhighlight %} + +>Notice that this pipeline is using dynamic git repository variables, so it must be linked to a least one [git trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) in order to work. + +The last two annotations add the text of a file as a value. You can define an absolute or relative path. No processing is done on the file before being stored. If a file is not found, the annotation will still be added verbatim. +We suggest you only store small text files in this manner as annotations values. + +## Removing annotations + +You can also remove annotations by mentioning their name: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_custom_step: + title: Adding annotations to a pipeline + image: alpine:3.9 + commands: + - echo "Hello" + on_success: + annotations: + set: + - entity_id: my-project/my-basic-pipeline + entity_type: pipeline + annotations: + - my_annotation_example1: 10.45 + - my_empty_annotation + - my_string_annotation: Hello World + - my_second_annotation: This one will stay + my_unit_tests: + title: Removing annotations + image: alpine:3.9 + commands: + - echo "Tests failed" + - exit 1 + on_fail: + annotations: + unset: + - entity_id: my-project/my-basic-pipeline + entity_type: pipeline + annotations: + - my_annotation_example1 + - my_empty_annotation + - my_string_annotation +{% endraw %} +{% endhighlight %} + +You can also use both `unset` and `set` block in a single `annotations` block. And of course, you can remove annotations from multiple entities. + +The `unset` annotation can be used with all post-step operations (`on_success`, `on_fail`, `on_finish`). + + +## Adding annotations to the current build/image + +As a convenience feature: + +1. If your pipeline has a build step +1. If you want to add annotations to the present build or image + +you can also define annotations in the root level of the build step and not mention the entity id and type. Annotations will then be added in the present build. + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'kostis-codefresh/nestjs-example' + revision: 'master' + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-app-image + working_directory: ./ + tag: 'sample' + dockerfile: Dockerfile + annotations: + set: + - annotations: + - my_number_annotation: 9999 + - my_empty_annotation + - my_docker_file: "file:Dockerfile" + - my_text_annotation: simple_text +{% endraw %} +{% endhighlight %} + +After running this pipeline at least once, you can retrieve the annotations from any previous build by using the respective id: + +```shell +codefresh get annotation build 5ce26f5ff2ed0edd561fa2fc +``` + +You can also define `entity_type` as `image` and don't enter any `entity_id`. In this case the image created from the build step will be annotated. + + +Note that this syntax is optional. You can still define annotations for a build/image or any other entity using the post operations of any step by mentioning explicitly the target id and type. + +## Related articles +[Image annotations]({{site.baseurl}}/docs/docker-registries/metadata-annotations/) +[Post-step operations]({{site.baseurl}}/docs/pipelines/post-step-operations/) +[Creating CI pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Hooks in CI pipelines]({{site.baseurl}}/docs/pipelines/hooks/) diff --git a/_docs/pipelines/condition-expression-syntax.md b/_docs/pipelines/condition-expression-syntax.md new file mode 100644 index 000000000..322549f3c --- /dev/null +++ b/_docs/pipelines/condition-expression-syntax.md @@ -0,0 +1,107 @@ +--- +title: "Condition Expression Syntax" +description: "Condition expressions can be included in each step in your codefresh.yml, and must be satisfied for the step to execute." +group: pipelines +redirect_from: + - /docs/condition-expression-syntax/ + - /docs/codefresh-yaml/expression-condition-syntax/ +toc: true +--- +Each step in `codefresh.yml` file can contain conditions expressions that must be satisfied for the step to execute. + +This is a small example of where a condition expression can be used: + `YAML` +{% highlight yaml %} +step-name: + description: Step description + image: image/id + commands: + - bash-command1 + - bash-command2 + when: + condition: + all: + executeForMasterBranch: "{% raw %}'${{CF_BRANCH}}{% endraw %}' == 'master'" +{% endhighlight %} + +A condition expression is a basic expression that is evaluated to true/false (to decide whether to execute or not to execute), and can have the following syntax: + +### Types + +{: .table .table-bordered .table-hover} +| Type | True/False Examples | True/False | +| ------- | ----------------------------------------- | --------------| +| String | True: "hello"
            False: "" | {::nomarkdown}
            • String with content = true
            • Empty string = false
            • String with content = true
            String comparison is lexicographic.{:/} | +| Number | True: 5
            True: 3.4
            True: 1.79E+308 | {::nomarkdown}
            • Any number other than 0 = true.
            • 0 = false
            {:/} | +| Boolean | True: true
            False: false | {::nomarkdown}
            • True = true
            • False = false
            {:/} | +| Null | False: null | Always false | + +### Variables + +You can use the User Provided variables as explained in [Variables]({{site.baseurl}}/docs/pipelines/variables/), including the [variables +exposed by each individual pipeline step]({{site.baseurl}}/docs/pipelines/variables/#step-variables). + +### Unary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ---------- | --------------------- | +| `-` | Negation of numbers | +| `!` | Logical NOT | + +### Binary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| --------------------------- | ----------- | +| Add, String Concatenation | `+` | +| Subtract | `-` | +| Multiply | `*` | +| Divide | `/` | +| Modulus | `%` | +| Logical AND | `&&` | +| Logical OR | `||` | + +### Comparisons + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ----------- | ---------------------- | +| `==` | Equal to | +| `!=` | Not equal to | +| `>` | Greater than | +| `>=` | Greater than or equal | +| `<` | Less than | +| `<=` | Less than or equal | + +### Functions + +{: .table .table-bordered .table-hover} +| Function Name | Parameters | Return value | Example | +| ------------- | ------------------ | -------------- | ----------------------- | +| String | 0: number or string | String of input value. | `String(40) == '40'` | +| Number | 0: number or string | Number of input value. | `Number('50') == 50`
            `Number('hello')` is invalid | +| Boolean | 0: number or string | Boolean of input value. | `Boolean('123') == true`
            `Boolean('') == false`
            `Boolean(583) == true`
            `Boolean(0) == false` | +| round | 0: number | Rounded number. | `round(1.3) == 1`
            `round(1.95) == 2` | +| floor | 0: number | Number rounded to floor. | `floor(1.3) == 1`
            `floor(1.95) == 1` | +| upper | 0: string | String in upper case. | `upper('hello') == 'HELLO'` | +| lower | 0: string | String in lower case. | `lower('BYE BYE') == 'bye bye'` | +| trim | 0: string | Trimmed string. | `trim(" abc ") == "abc"` | +| trimLeft | 0: string | Left-trimmed string. | `trimLeft(" abc ") == "abc "`| +| trimRight | 0: string | Right-trimmed string. | `trimRight(" abc ") == " abc"` | +| replace | 0: string - main string
            1: string - substring to find
            2: string - substring to replace | Replace all instances of the sub-string (1) in the main string (0) with the sub-string (2). | `replace('hello there', 'e', 'a') == 'hallo thara'`| +| substring | 0: string - main string
            1: string - index to start
            2: string - index to end | Returns a sub-string of a string. | `substring("hello world", 6, 11) == "world"` | +| length | string | Length of a string. | `length("gump") == 4` | +| includes | 0: string - main string
            1: string - string to search for | Whether a search string is located within the main string. | `includes("codefresh", "odef") == true` | +| indexOf | 0: string - main string
            1: string - string to search for | Index of a search string if it is found inside the main string | `indexOf("codefresh", "odef") == 1` | +| match | 0: string - main string
            1: string - regular expression string, [JS style](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) (Note: in JS strings, the backslash `\` is an escape character so in order to use a literal backslash, you need to escape it. For example: `"^\\d+$"` instead of `"^\d+$"`)
            2: boolean - ignore case | Search for a regular expression inside a string, ignoring or not ignoring case | `match("hello there you", "..ll.", false) == true`
            `match("hello there you", "..LL.", false) == false`
            `match("hello there you", "hell$", true) == false`
            `match("hello there you", "^hell", true) == true`
            `match("hello there you", "bye", false) == false` | +| Variable | string | Search for the value of a variable | `Variable('some-clone')` | +| Member | 0: string - variable name
            1: string - member name | Search for the value of a variable member | `Member('some-clone', 'working-directory')` | + +## What to read next + +* [Conditional Execution of Steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) +* [Condition Expression Syntax]({{site.baseurl}}/docs/codefresh-yaml/condition-expression-syntax/) +* [Working Directories]({{site.baseurl}}/docs/codefresh-yaml/working-directories/) +* [Annotations]({{site.baseurl}}/docs/codefresh-yaml/annotations/) +* [Pipeline/Step hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/) diff --git a/_docs/pipelines/conditional-execution-of-steps.md b/_docs/pipelines/conditional-execution-of-steps.md new file mode 100644 index 000000000..ef0928317 --- /dev/null +++ b/_docs/pipelines/conditional-execution-of-steps.md @@ -0,0 +1,248 @@ +--- +title: "Conditional execution of steps" +description: "Skip specific pipeline steps according to one or more conditions" +group: pipelines +sub_group: steps +redirect_from: + - /docs/conditional-execution-of-steps/ +toc: true +--- +For each step in a `codefresh.yml` file, you can define a set of conditions which need to be satisfied in order to execute the step. (An introduction to the `codefresh.yml` file can be found [here]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/).) + +There are currently two main methods to define conditions: +* Branch conditions +* Expression conditions + +## Branch Conditions + +Usually, you'll want to define a branch condition, be it of the type ```ignore``` for blacklisting a set of branches or of the type ```only``` for allowlisting a set of branches. Each branch specification can either be an exact branch name, e.g. ```master```, or a regular expression, e.g. ```/hotfix$/```. Case insensitive regexps (```/^FB-/i```) are also supported. + +Here are some examples: + +Only execute for the ```master``` branch: + + `only-master-branch.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + branch: + only: + - master +{% endhighlight %} + +Only execute for branches whose name begins with ```FB-``` prefix (feature branches): + + `only-feature-branches.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + branch: + only: + - /^FB-.*/i +{% endhighlight %} + +Ignore the develop branch and master branch: + + `ignore-master-and-develop-branch.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + branch: + ignore: + - master + - develop +{% endhighlight %} + + +>We use [JavaScript regular expressions](https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions) for the syntax in branch conditions. + + +## Condition expressions + +Alternatively, you can use more advanced condition expressions. + +This follows the standard [condition expression syntax](#condition-expression-syntax). In this case, you can choose to execute if ```all``` expression conditions evaluate to ```true```, or to execute if ```any``` expression conditions evaluate to ```true```. + +> Note: Use "" around variables with text to avoid errors in processing the conditions. Example: "${{CF_COMMIT_MESSAGE}}" + +Here are some examples. Execute if the string ```[skip ci]``` is not part of the main repository commit message AND if the branch is ```master``` + + `all-conditions.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + condition: + all: + noSkipCiInCommitMessage: 'includes(lower({% raw %}"${{CF_COMMIT_MESSAGE}}"{% endraw %}), "skip ci") == false' + masterBranch: '{% raw %}"${{CF_BRANCH}}{% endraw %}" == "master"' +{% endhighlight %} + +Execute if the string ```[skip ci]``` is not part of the main repository commit message, OR if the branch is not a feature branch (i.e. name starts with FB-) + + `any-condition.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + condition: + any: + noSkipCiInCommitMessage: 'includes(lower({% raw %}"${{CF_COMMIT_MESSAGE}}"{% endraw %}), "skip ci") == false' + notFeatureBranch: 'match({% raw %}"${{CF_BRANCH}}"{% endraw %}, "^FB-", true) == false' +{% endhighlight %} + +Each step in `codefresh.yml` file can contain conditions expressions that must be satisfied for the step to execute. + +This is a small example of where a condition expression can be used: + `YAML` +{% highlight yaml %} +step-name: + description: Step description + image: image/id + commands: + - bash-command1 + - bash-command2 + when: + condition: + all: + executeForMasterBranch: "{% raw %}'${{CF_BRANCH}}{% endraw %}' == 'master'" +{% endhighlight %} + +### Condition expression syntax +A condition expression is a basic expression that is evaluated to true/false (to decide whether to execute or not to execute), and can have the following syntax: + +#### Types + +{: .table .table-bordered .table-hover} +| Type | True/False Examples | True/False | +| ------- | ----------------------------------------- | --------------| +| String | True: "hello"
            False: "" | {::nomarkdown}
            • String with content = true
            • Empty string = false
            • String with content = true
            String comparison is lexicographic.{:/} | +| Number | True: 5
            True: 3.4
            True: 1.79E+308 | {::nomarkdown}
            • Any number other than 0 = true.
            • 0 = false
            {:/} | +| Boolean | True: true
            False: false | {::nomarkdown}
            • True = true
            • False = false
            {:/} | +| Null | False: null | Always false | + +#### Variables + +You can use the User Provided variables as explained in [Variables]({{site.baseurl}}/docs/pipelines/variables/), including the [variables +exposed by each individual pipeline step]({{site.baseurl}}/docs/pipelines/variables/#step-variables). + +#### Unary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ---------- | --------------------- | +| `-` | Negation of numbers | +| `!` | Logical NOT | + +#### Binary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| --------------------------- | ----------- | +| Add, String Concatenation | `+` | +| Subtract | `-` | +| Multiply | `*` | +| Divide | `/` | +| Modulus | `%` | +| Logical AND | `&&` | +| Logical OR | `||` | + +#### Comparisons + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ----------- | ---------------------- | +| `==` | Equal to | +| `!=` | Not equal to | +| `>` | Greater than | +| `>=` | Greater than or equal | +| `<` | Less than | +| `<=` | Less than or equal | + +#### Functions + +{: .table .table-bordered .table-hover} +| Function Name | Parameters | Return value | Example | +| ------------- | ------------------ | -------------- | ----------------------- | +| String | 0: number or string | String of input value. | `String(40) == '40'` | +| Number | 0: number or string | Number of input value. | `Number('50') == 50`
            `Number('hello')` is invalid | +| Boolean | 0: number or string | Boolean of input value. | `Boolean('123') == true`
            `Boolean('') == false`
            `Boolean(583) == true`
            `Boolean(0) == false` | +| round | 0: number | Rounded number. | `round(1.3) == 1`
            `round(1.95) == 2` | +| floor | 0: number | Number rounded to floor. | `floor(1.3) == 1`
            `floor(1.95) == 1` | +| upper | 0: string | String in upper case. | `upper('hello') == 'HELLO'` | +| lower | 0: string | String in lower case. | `lower('BYE BYE') == 'bye bye'` | +| trim | 0: string | Trimmed string. | `trim(" abc ") == "abc"` | +| trimLeft | 0: string | Left-trimmed string. | `trimLeft(" abc ") == "abc "`| +| trimRight | 0: string | Right-trimmed string. | `trimRight(" abc ") == " abc"` | +| replace | 0: string - main string
            1: string - substring to find
            2: string - substring to replace | Replace all instances of the sub-string (1) in the main string (0) with the sub-string (2). | `replace('hello there', 'e', 'a') == 'hallo thara'`| +| substring | 0: string - main string
            1: string - index to start
            2: string - index to end | Returns a sub-string of a string. | `substring("hello world", 6, 11) == "world"` | +| length | string | Length of a string. | `length("gump") == 4` | +| includes | 0: string - main string
            1: string - string to search for | Whether a search string is located within the main string. | `includes("codefresh", "odef") == true` | +| indexOf | 0: string - main string
            1: string - string to search for | Index of a search string if it is found inside the main string | `indexOf("codefresh", "odef") == 1` | +| match | 0: string - main string
            1: string - regular expression string, [JS style](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) (Note: in JS strings, the backslash `\` is an escape character so in order to use a literal backslash, you need to escape it. For example: `"^\\d+$"` instead of `"^\d+$"`)
            2: boolean - ignore case | Search for a regular expression inside a string, ignoring or not ignoring case | `match("hello there you", "..ll.", false) == true`
            `match("hello there you", "..LL.", false) == false`
            `match("hello there you", "hell$", true) == false`
            `match("hello there you", "^hell", true) == true`
            `match("hello there you", "bye", false) == false` | +| Variable | string | Search for the value of a variable | `Variable('some-clone')` | +| Member | 0: string - variable name
            1: string - member name | Search for the value of a variable member | `Member('some-clone', 'working-directory')` | + +## Execute steps according to the presence of a variable + +If a variable does not exist in a Codefresh pipeline, then it will simply stay as a string inside the definition. When the `{% raw %}${{MY_VAR}}{% endraw %}` variable is not available, the engine will literally print `{% raw %}${{MY_VAR}}{% endraw %}`, because that variable doesn't exist. + +You can use this mechanism to decide which steps will be executed if a [variable]({{site.baseurl}}/docs/pipelines/variables/) exists or not. + + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Running if variable exists" + type: "freestyle" + image: "alpine:3.9" + commands: + - echo "Step 1 is running" + when: + condition: + all: + whenVarExists: 'includes("${{MY_VAR}}", "{{MY_VAR}}") == false' + step2: + title: "Running if variable does not exist" + type: "freestyle" + image: "alpine:3.9" + commands: + - echo "Step 2 is running" + when: + condition: + all: + whenVarIsMissing: 'includes("${{MY_VAR}}", "{{MY_VAR}}") == true' +{% endraw %} +{% endhighlight %} + +Try running the pipeline above and see how it behaves when a variable called `MY_VAR` exists (or doesn't exist). + +>Notice that if you use this pattern a lot it means that you are trying to create a complex pipeline that is very smart. We suggest you create instead multiple [simple pipelines for the same project]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#trunk-based-development). + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Variables]({{site.baseurl}}/docs/pipelines/variables/) +[Pull Requests and Branches]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/) +[Pipeline/Step hooks]({{site.baseurl}}/docs/pipelines/hooks/) diff --git a/_docs/pipelines/configuration/build-status.md b/_docs/pipelines/configuration/build-status.md new file mode 100644 index 000000000..f30776a68 --- /dev/null +++ b/_docs/pipelines/configuration/build-status.md @@ -0,0 +1,151 @@ +--- +title: "Public logs and status badges" +description: "Embedding Status Images and viewing public logs" +group: pipelines +sub_group: configuration +toc: true +redirect_from: + - /docs/build-status + - /docs/build-status/ + - /docs/build-badges-1 + - /docs/build-badges-1/ +--- + + +Badges are simple images that show you the last build status. They support both the pipeline and branch service status. +The badges can be embedded into your repository’s `readme.md` file or any other website. + +Here is an example: + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/badge.png" +url="/images/pipeline/badges/badge.png" +alt="Build badge example" +caption="Build badge example" +max-width="80%" +%} + +Clicking the badge takes you into the build view of the pipeline. + +## Finding the build badge of your project + +In the pipeline view of a project, select the *Settings* tab and then click *General*. Next to the *badges* section you will find a link to the build badge. + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/get-build-badge.png" +url="/images/pipeline/badges/get-build-badge.png" +alt="Build badge setup" +caption="Build badge setup" +max-width="80%" +%} + +Click on it and you will get a new dialog where you can select + + * The graphical style of the badge (two styles are offered) + * The syntax for the badge + +{% include + image.html + lightbox="true" + file="/images/a0c4aed-codefresh_badges_2.png" + url="/images/a0c4aed-codefresh_badges_2.png" + alt="Codefresh badges syntax" + caption="Codefresh badges syntax" + max-width="70%" + %} + + The following embedding options are available: + + * Markdown for usage in text files (e.g. `README.MD`) + * Plain HTML for normal websites + * AsciiDoc for documentation pages + * Image for any other document type + + +Copy the snippet in your clipboard. + +## Using the build badge + +Paste the snippet in the file/document where you want the badge to be visible (e.g. in a Readme file in GitHub). + +For example, the markdown syntax is + +``` +[![Codefresh build status]( BADGE_LINK )]( URL_TO_PIPELINE ) +``` + +You can also manually change the parameters of the link by using +`https://g.codefresh.io/api/badges/build?*param1*=xxx&*param2*=yyy`\\ +when *param1*, *param2*, etc... are the parameters from the table below. + +{: .table .table-bordered .table-hover} +| Query parameter | Description | +| -----------------------|--------------------------------------------------------- | +| **branch** - optional | Name of the branch
            If not supplied, default is master | +| **repoName** | Name of the repository | +| **pipelineName** | Name of the pipeline | +| **accountName** | Name of the account | +| **repoOwner** | The name of the repository owner | +| **key** - optional | Token related to the account | +| **type** - optional | Badge types
            cf-1: ![Codefresh build status]( http://g.codefresh.io/api/badges/build/template/urls/cf-1) - also the default badge.
            cf-2: ![Codefresh build status]( http://g.codefresh.io/api/badges/build/template/urls/cf-2) | + +Everybody who looks at your readme file will also see the current build status of the associated Codefresh pipeline. + +## Public build logs + +By default, even though the badge shows the build status for everybody, clicking the badge allows only Codefresh registered users that also have access to the pipeline to view the actual builds. + +If you are working on an open-source project and wish for greater visibility, you can enable public logs (and associated badge) for your project so that any user can see the pipeline results (even if they are not logged into Codefresh). + +Public logs are disabled by default and you need to explicitly enable them. + +>This happens for security reasons. Make sure that the logs you are exposing to the Internet do not have any sensitive information. If you are unsure, you can still use the private badge that shows project status only as explained in the previous section. + +To enable the public logs, toggle the respective switch in the pipeline settings: + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/toggle-public-logs.png" +url="/images/pipeline/badges/toggle-public-logs.png" +alt="Enabling public logs" +caption="Enabling public logs" +max-width="80%" +%} + +Then click the *Save* button to apply changes for your pipeline. Once that is done you will also get a second badge (public) as well as the public URL to your project. + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/get-public-url.png" +url="/images/pipeline/badges/get-public-url.png" +alt="Getting the public URL log view" +caption="Getting the public URL log view" +max-width="70%" +%} + +Now you can use this badge and/or public URL anywhere and all users can view your logs without being logged into Codefresh at all (or having access to your pipeline). + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/view-public-logs.png" +url="/images/pipeline/badges/view-public-logs.png" +alt="Public logs" +caption="Public logs" +max-width="90%" +%} + +Your visitors can also click on each individual pipeline step and see the logs for that step only. + +If you are using Codefresh to manage a public project, you should also use the capability to [trigger builds from external forks]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#support-for-building-pull-requests-from-forks). + +## Related articles +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Monitoring pipelines]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/) diff --git a/_docs/pipelines/configuration/pipeline-settings.md b/_docs/pipelines/configuration/pipeline-settings.md new file mode 100644 index 000000000..b9c78e298 --- /dev/null +++ b/_docs/pipelines/configuration/pipeline-settings.md @@ -0,0 +1,87 @@ +--- +title: "Global settings for pipelines" +description: "Define global options for pipeline templates, yaml sources and approval behavior" +group: pipelines +sub_group: configuration +toc: true +--- + +To access your global pipeline settings navigate to [https://g.codefresh.io/account-admin/account-conf/pipeline-settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings) or click on *Account settings* on the left sidebar and then choose *Pipeline settings* item on the next screen. + +On this page, you can define global parameters for the whole Codefresh account regarding pipeline options. Users can still override some of these options for individual pipelines. + +{% include image.html +lightbox="true" +file="/images/pipeline/pipeline-settings/pipeline-settings-ui.png" +url="/images/pipeline/pipeline-settings/pipeline-settings-ui.png" +alt="Pipeline settings" +caption="Pipeline settings" +max-width="80%" +%} + + +## Pause pipeline executions + +Pause builds for pipelines at the account level, for example, during maintenance. + +* **Pause build execution** is disabled by default. +* When enabled: + * New pipelines in the account are paused immediately. + * Existing pipelines with running builds are paused only after the builds have completed execution. +* Paused pipelines are set to status Pending, and remain in this status until **Pause build execution** is manually disabled for the account. + +{% include image.html +lightbox="true" +file="/images/pipeline/pipeline-settings/pause-pipeline-enabled.png" +url="/images/pipeline/pipeline-settings/pause-pipeline-enabled.png" +alt="Pause Build Execution pipeline setting enabled" +caption="Pause Build Execution pipeline setting enabled" +max-width="80%" +%} + +## Template section + +Here you can define global template behavior. The options are: + +* Enable [pipeline templates]({{site.baseurl}}/docs/docs/pipelines/pipelines/#using-pipeline-templates) for users. If this is enabled some pipelines can be marked as templates and users can still select them when creating a new pipeline. +* Decide if users can clone an existing pipeline (along with its triggers and associated parameters) when [creating a new pipeline]({{site.baseurl}}/docs/docs/pipelines/pipelines/#creating-new-pipelines). + +Note that templates are simply normal pipelines “marked” as a template. There is no technical difference between templates and actual pipelines. + +## Pipeline YAML section + +Here you can restrict the sources of pipeline YAML that users can select. The options are: + +* Enable/Disable the [inline editor]({{site.baseurl}}/docs/docs/pipelines/pipelines/#using-the-inline-pipeline-editor) where YAML is stored in Codefresh SaaS +* Enable/disable pipeline YAML from connected Git repositories +* Enable/disable pipeline YAML from [external URLs]({{site.baseurl}}/docs/docs/pipelines/pipelines/#loading-codefreshyml-from-version-control) + +You need to allow at least one of these options so that users can create new pipelines. We suggest leaving the first option enabled when users are still learning about Codefresh and want to experiment. + +## Advanced pipeline options + +Here you can set the defaults for advanced pipeline behavior. The options are: + +* [Keep or discard]({{site.baseurl}}/docs/pipelines/steps/approval/#keeping-the-shared-volume-after-an-approval) the volume when a pipeline is entering approval state +* Whether pipelines in approval state [count or not against concurrency]({{site.baseurl}}/docs/pipelines/steps/approval/#define-concurrency-limits) +* Define the [Service Account]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/#setting-up-ecr-integration---service-account) for Amazon ECR integration. +* Set the default registry where all Public Marketplace Step images are pulled from. Registries listed are from the [Docker Registry]({{site.baseurl}}/docs/integrations/docker-registries/) integration page. + * Example: Public Marketplace Step image is defined to use Docker Hub. If you select a quay.io integration, all Public Marketplace Step images will be pulled from quay.io instead of Docker Hub. + * Note: This does not affect Freestyle Steps. + +Note that the first option affects pipeline resources and/or billing in the case of SaaS pricing. It will also affect users of existing pipelines that depend on this behavior. It is best to enable/disable this option only once at the beginning. + +## Default Behavior for Build Step + +Here you can decide if the build step will push images or not according to your organization’s needs. The options are: + +1. Users need to decide if an image will be pushed or not after it is built +2. All built images are automatically pushed to the default registry +3. All built images are NOT pushed anywhere by default + +Note that this behavior is simply a convenience feature for legacy pipelines. Users can still use a [push step]({{site.baseurl}}/docs/pipelines/steps/push/) in a pipeline and always push an image to a registry regardless of what was chosen in the build step. + +## Related articles +[Creating Pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Git Integration]({{site.baseurl}}/docs/integrations/git-providers/) diff --git a/_docs/pipelines/configuration/secrets-store.md b/_docs/pipelines/configuration/secrets-store.md new file mode 100644 index 000000000..cccd4a47b --- /dev/null +++ b/_docs/pipelines/configuration/secrets-store.md @@ -0,0 +1,97 @@ +--- +title: "Secrets in pipelines" +description: "Use Kubernetes secrets in Codefresh" +group: pipelines +sub_group: configuration +toc: true +--- + +Once you have [connected Codefresh to your secrets storage]({{site.baseurl}}/docs/integrations/secret-storage/), you can use them in any pipeline or UI screen. + +> Note: This feature is for Enterprise accounts only. + +## Using secrets in pipelines + +The syntax for using the secret is {% raw %}`${{secrets.NAME_IN_CODEFRESH.KEY}}`{% endraw %}. + +> If you did not include the resource-name as a part of your secret store context creation, the syntax for using your secret differs slightly: + {% raw %}${{secrets.NAME_IN_CODEFRESH.RESOURCE-NAME@KEY}}{% endraw %} + The previous KEY portion is now made of two parts separated using @, where the left side is the name of the resource in the namespace, and the right side the key in that resource. + +To use the secret in your pipeline, you have two options: + +* Define it as a pipeline variable: + +{% include +image.html +lightbox="true" +file="/images/pipeline/secrets/secrets-pipeline-var.png" +url="/images/pipeline/secrets/secrets-pipeline-var.png" +alt="Secrets Pipeline Variable" +caption="Secrets stored in Pipeline Variable" +max-width="80%" +%} + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + step: + type: freestyle + arguments: + image: alpine + commands: + - echo $SECRET +{% endraw %} +{% endhighlight %} + +* Use the secret directly in your YAML + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + step: + type: freestyle + arguments: + image: alpine + environment: + - SECRET=${{secrets.test.key1}} + commands: + - echo $SECRET +{% endraw %} +{% endhighlight %} + + +## Using secrets in the Codefresh UI + +You can also use secrets in the GUI screens that support them. Currently you can use secrets in: + +* Values in [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) +* Integration with [cloud storage]({{site.baseurl}}/docs/testing/test-reports/#connecting-your-storage-account) + +Where secret integration is supported, click on the lock icon and enable the toggle button. You will get a list of your connected secrets: + + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +url="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +alt="Using external secrets in shared configuration values" +caption="Using external secrets in shared configuration values" +max-width="50%" +%} + +If you have already specified the resource field during secret definition the just enter on the text field the name of the secret directly, i.e. `my-secret-key`. +If you didn't include a resource name during secret creation then enter the full name in the field like `my-secret-resource@my-secret-key`. + + +## Related articles +[Shared Configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) +[Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/) +[Debugging Pipelines]({{site.baseurl}}/docs//yaml-examples/examples/trigger-a-k8s-deployment-from-docker-registry/) + diff --git a/_docs/pipelines/configuration/shared-configuration.md b/_docs/pipelines/configuration/shared-configuration.md new file mode 100644 index 000000000..1485e832d --- /dev/null +++ b/_docs/pipelines/configuration/shared-configuration.md @@ -0,0 +1,265 @@ +--- +title: "Shared configuration for pipelines" +description: "How to keep your pipelines DRY" +group: pipelines +sub_group: configuration +toc: true +--- + +After creating several pipelines in Codefresh, you will start to notice several common values between them. Common examples are access tokens, environment URLs, configuration properties etc. + +Codefresh allows you to create those shared values in a central place and then reuse them in your pipelines +avoiding the use of copy-paste. + +You can share: + +* Environment parameters (easy) +* Helm values (easy) +* Any kind of YAML data (advanced) + + +## Creating shared configuration + +From the left sidebar click *Account settings* to enter your global settings. Then choose *Shared Configuration* from the left menu. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-configuration.png" +url="/images/pipeline/shared-configuration/shared-configuration.png" +alt="Creating shared configuration snippets" +caption="Creating shared configuration snippets" +max-width="50%" +%} + +You can create four types of shared configuration: + +* **Shared Configuration**: for environment variables +* **Shared Secret**: for encrypted environment variables of sensitive data (access tokens, etc.) +* **YAML**: for Helm values or any other generic information +* **Secret YAML**: for above, but encrypts the contents + +>RBAC is supported for all types of shared configurations. + +You can create as many shared snippets as you want (with unique names). + +### Using external secrets as values + +Note that the default "shared secrets" and "secret yaml" entities use the built-in secret storage of Codefresh. You can also +use any [external secrets that you have defined]({{site.baseurl}}/docs/integrations/secret-storage/) (such as Kubernetes secrets), by using the normal entities and then clicking on the lock icon that appears. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +url="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +alt="Using external secrets in shared configuration values" +caption="Using external secrets in shared configuration values" +max-width="50%" +%} + +If you have already specified the resource field during secret definition the just enter on the text field the name of the secret directly, i.e. `my-secret-key`. +If you didn't include a resource name during secret creation then enter the full name in the field like `my-secret-resource@my-secret-key`. + +### Level of access + +For each set of values you can toggle the level of access by [non-admin users]({{site.baseurl}}/docs/administration/access-control/#users-and-administrators). If it is off, users will **not** be able to use the [CLI](https://codefresh-io.github.io/cli/) or [API]({{site.baseurl}}/docs/integrations/codefresh-api/) +to access these [values](https://codefresh-io.github.io/cli/contexts/). If it is on, all users from all your Codefresh teams will be able to access this set of values +with CLI commands or API calls. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-config-access.png" +url="/images/pipeline/shared-configuration/shared-config-access.png" +alt="Allow access to non-admin users" +caption="Allow access to non-admin users" +max-width="60%" +%} + +We recommend that you disable access for all values of type *shared secret* and *secret YAML* unless your organization has different needs. + + +## Using shared environment variables + +Each pipeline has a set of environment variables that can be defined in the *Workflow* screen. +To import a shared configuration open the pipeline editor, and from the tabs on the right side select *VARIABLES*. Then click the gear icon to *Open Advanced Options*: + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/environment-variables.png" +url="/images/pipeline/shared-configuration/environment-variables.png" +alt="Pipeline environment variables" +caption="Pipeline environment variables" +max-width="50%" +%} + +To use your shared configuration, click the *Import from shared configuration* button and select the snippet from the list: + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/import-variables.png" +url="/images/pipeline/shared-configuration/import-variables.png" +alt="Importing shared configuration" +caption="Importing shared configuration" +max-width="50%" +%} + +Once you click *Add* the values from the shared configuration will be appended to the ones +you have in your pipelines. In case of similar values the shared configuration will follow the [precedence rules]({{site.baseurl}}/docs/pipelines/variables/#user-provided-variables). + + +## Using shared Helm values + +To use a shared YAML snippet for Helm values you can install a new Helm chart either from: + +* The [Helm chart list]({{site.baseurl}}/docs/new-helm/add-helm-repository/#install-chart-from-your-helm-repository) +* The [Helm environment board]({{site.baseurl}}/docs/new-helm/helm-environment-promotion/#moving-releases-between-environments). + +In both cases, when you see the Helm installation dialog you can import any of your YAML snippets +to override the default chart values. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/helm-import.png" +url="/images/pipeline/shared-configuration/helm-import.png" +alt="Importing Helm values" +caption="Importing Helm values" +max-width="50%" +%} + +From the same dialog you can also create a brand-new shared configuration snippet of type YAML. +Not only it will be used for this Helm chart, but it will be added in your global shared configuration as well. + +## Using values from the Shared Configuration in your Helm step + +Additionally, you can define shared variables in your account settings and reuse those across your Helm steps, and specifically, in your [custom Helm values]({{site.baseurl}}/docs/docs/new-helm/using-helm-in-codefresh-pipeline/#helm-values). + +Under *Account Setting* > *Shared Configuration*, add the variable to your shared configuration. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/helm-shared-variables.png" +url="/images/pipeline/shared-configuration/helm-version-shared.png" +alt="Adding shared configuration variables" +caption="Adding shared configuration variables" +max-width="50%" +%} + +Go to the workflow of the Codefresh pipeline to which you want to add the variable. Then select *variables* from the right sidebar. *Open advanced configuration* and select *Import from shared configuration*. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/environment-variables.png" +url="/images/pipeline/shared-configuration/environment-variables.png" +alt="Pipeline environment variables" +caption="Pipeline environment variables" +max-width="50%" +%} + +This will allow you to add shared variables. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-helm-variables.png" +url="/images/pipeline/shared-configuration/shared-helm-variables.png" +alt="Shared helm variable" +caption="Shared helm variable" +max-width="50%" +%} + +Add the shared variables to your Helm step: + +{% highlight shell %} +{% raw %} +deploy: + type: "helm" + working_directory: "./react-article-display" + stage: "deploy" + arguments: + action: "install" + chart_name: "charts/example-chart" + release_name: "test-chart" + helm_version: "${{HELM_VERSION}}" + kube_context: "anais-cluster@codefresh-sa" + custom_values: + - 'pullPolicy=${{PULL_POLICY}}' +{% endraw %} +{% endhighlight %} + +The shared variables can now be used across your pipelines. + +## Sharing any kind of YAML data in pipelines + +All the snippets from shared configuration are also available as context in the [Codefresh CLI](https://codefresh-io.github.io/cli/contexts/) + +This means that you can manipulate them programmatically and read their values in the pipeline in any way you see fit. + +If for example you have a shared configuration named `my-global-config` you can easily read its contents programmatically using the CLI: + +{% highlight shell %} +$codefresh get context my-global-config --output=yaml + +apiVersion: v1 +kind: context +metadata: + default: false + system: false + name: my-global-config +type: config +spec: + type: config + data: + foo: bar +{% endhighlight %} + +### Example - custom value manipulation + +Let's say that you have a YAML segment with the following contents: + +{% highlight yaml %} +favorite: + drink: coffee + food: pizza +{% endhighlight %} + +Here is a pipeline step that is reading the yaml snippet and extracts a value + + `YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyFavoriteFoodStep: + title: Favorite food + image: codefresh/cli + commands: + - echo I love eating $(codefresh get context my-food-values --output=json | jq -r '.spec.data.favorite.food') +{% endraw %} +{% endhighlight %} + +Once the pipeline runs, you will see in the logs: + +``` +I love eating pizza +``` + +## Manipulating shared configuration programmatically + +You can also create/update/delete shared configuration via the [Codefresh CLI](https://codefresh-io.github.io/cli/) or [API]({{site.baseurl}}/docs/integrations/codefresh-api/). + +See the [context section](https://codefresh-io.github.io/cli/contexts/create-context/) in the CLI documentation. + + + +## Related articles +[Variables]({{site.baseurl}}/docs/pipelines/variables/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) + diff --git a/_docs/pipelines/debugging-pipelines.md b/_docs/pipelines/debugging-pipelines.md new file mode 100644 index 000000000..7e88d0e36 --- /dev/null +++ b/_docs/pipelines/debugging-pipelines.md @@ -0,0 +1,250 @@ +--- +title: "Debugging pipelines" +description: "Pause and inspect pipelines" +group: pipelines +toc: true +--- + +In addition to [running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/), Codefresh also allows you to debug pipelines by stopping their execution and inspecting manually their state (files, environment variables, tools etc.) + + +The Codefresh pipeline debugger works similar to your IDE debugger. You can place breakpoints on one or more pipeline steps and once the pipeline hits one of them, it will stop. You will then get a terminal like interface inside your pipeline step where you can run any commands that you wish in order to understand the state of the container. + + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/debug-session.png" + url="/images/pipeline/debug/debug-session.png" + alt="A debugging session" + caption="A debugging session" + max-width="70%" +%} + +There are several options for defining exactly when a step will stop. + +## Entering the debugger mode + +There are threes ways to enter the debugging mode in a pipeline. You can activate the debugging button when your run the pipeline: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/run-pipeline-debug.png" + url="/images/pipeline/debug/run-pipeline-debug.png" + alt="Running a pipeline in debug mode" + caption="Running a pipeline in debug mode" + max-width="30%" +%} + +Alternatively if a pipeline is already running normally, you can enter debugging mode by clicking on the bug icon on the top right. + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/enter-debug-mode.png" + url="/images/pipeline/debug/enter-debug-mode.png" + alt="Switching to debug mode" + caption="Switching to debug mode" + max-width="60%" +%} + +You can restart a pipeline that has already finished in debug mode: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/restart-in-debug.png" + url="/images/pipeline/debug/restart-in-debug.png" + alt="Restart in debug mode" + caption="Restart in debug mode" + max-width="70%" +%} + +Now you are ready to place breakpoints in steps. + + +## Placing breakpoints + +Once the debugging mode is active, all pipeline steps will get an extra breakpoint icon on the far right of their box. + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/breakpoint.png" + url="/images/pipeline/debug/breakpoint.png" + alt="A step breakpoint" + caption="A step breakpoint" + max-width="70%" +%} + + +You can click on this icon and define a breakpoint for this particular step. You have the following options + +* *Before* - place a breakpoint before the step is initialized +* *Override* - place a breakpoint after the step has initialized but before its execution ([freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/)) +* *After* - place a breaking point after the step has finished execution. + +You can choose multiple debugging phases. In most cases the `Override` option is the most useful one. The `before` phase allows you to inspect +a pipeline step even before [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) are up. + +The `after` phase is useful if you want to verify files or variables after a step has finished its execution but before the next step starts. + + +## Using the debugger terminal + +Once the pipeline reaches a step that has a breakpoint, execution will pause and a new debugger terminal will become available: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/debug-window.png" + url="/images/pipeline/debug/debug-window.png" + alt="The debugging terminal" + caption="The debugging terminal" + max-width="60%" +%} + +You can now manually type commands to inspect your container. If your Codefresh plan has the basic debugging capabilities you can run the following commands: + +* `cd, ls` to see files +* `printenv` to see environment variables +* `cat` to read files +* `top` to see what is running +* `export` and [cf_export]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command) to create environment variables +* `exit` to finish the debugging session + +If you have placed a breakpoint in the `override` phase of a freestyle step then the container image is the same as the one defined in the step. Therefore you can execute all tools that you have placed in the image (e.g. compilers, linters, test frameworks etc.) + +In all cases the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) is automounted so you can examine your source code or any other intermediate artifacts placed in your project folder or the pipeline cache. + +If the breakpoint is on a `before` or `after` phase, the command line terminal is powered by an [alpine](https://alpinelinux.org/) image. The image has already useful tools such as `wget`, `nc` and `vi`. If you have the advanced debugging capabilities in your Codefresh plan you can then install additional tools on your own directly in the terminal with [apk](https://wiki.alpinelinux.org/wiki/Alpine_Linux_package_management). Examples: + +* `apk add curl` +* `apk add nano` +* `apk add go` +* `apk add python` + +Use the command `apk search foo` to search for a package named foo. + + +## Resuming execution + +Once you are happy with your debugging session, click the continue button to resume. + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/resume-button.png" + url="/images/pipeline/debug/resume-button.png" + alt="Continue execution button" + caption="Continue execution button" + max-width="60%" +%} + +The pipeline will continue and then stop for the next breakpoint (if any). You can still revisit the debugger window for previous steps to see what debugging commands you had executed. + +>Notice that to conserve resources, there is a 15 minute limit on each open debug session. If you don't resume the pipeline within 15 minutes after hitting a breakpoint the whole pipeline will stop with a timeout error. + +It is important to understand that if you have chosen the `override` phase in a freestyle step, then the commands mentioned in the pipeline definition are completely ignored. + +## Using the alternative debug window + +If you enable the debugger on a freestyle step with the "override" option, Codefresh will install some extra tooling on the Docker image that is needed for the debugger itself. + +By default, the internal debugger tooling is using node.js, so if your image is already based on Node.js, you might get version conflicts in your application. + +You can enable an alternative debugger by passing the variable `DEBUGGER_RUNNER = 2` on the whole pipeline: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/alternative-debugger.png" + url="/images/pipeline/debug/alternative-debugger.png" + alt="Enabling the Python based debugger" + caption="Enabling the Python based debugger" + max-width="60%" +%} + +This debugger is based on Python instead of Node.js and it can work with both Python 2 and 3 Docker images. +This way the debugger tools will not affect your application. You can also use the same method in a specific freestyle step like this: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + hello_world_step: + title: freestyle step + image: node:11.1 + environment: + - 'DEBUGGER_RUNNER=2' +{% endraw %} +{% endhighlight %} + + + + + +## Inserting breakpoints in the pipeline definition + +It is also possible to mention breakpoints in the Codefresh YAML instead of using the UI. Breakpoints mentioned in the `codefresh.yml` file have no effect when the pipeline is not running in Debug mode. You need to run the pipeline in debug mode in order for them to stop the pipeline. + +Here is the syntax: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - test +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/python-flask-sample-app' + revision: 'master' + git: github + stage: prepare + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: my-app-image + working_directory: ./ + tag: 'master' + dockerfile: Dockerfile + debug: + phases: + before: true + after: false + MyUnitTests: + title: Running Unit tests + stage: test + image: '${{MyAppDockerImage}}' + debug: + phases: + before: false + override: true + after: false + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +Once you run this pipeline in debug mode, it will automatically have breakpoints in the respective steps (but you can still override/change them using the GUI). + + +## Troubleshooting + +The debugger windows needs some extra tools in a docker image in order to work (such as the `bash` shell). Codefresh automatically installs these tools on your image without any configuration. + +If you get the message *your linux distribution is not supported* please contact us so that we can examine your docker image and make sure it is compatible with the Codefresh debugger. + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/) diff --git a/_docs/pipelines/docker-image-metadata.md b/_docs/pipelines/docker-image-metadata.md new file mode 100644 index 000000000..e2afc96c9 --- /dev/null +++ b/_docs/pipelines/docker-image-metadata.md @@ -0,0 +1,226 @@ +--- +title: "Docker image metadata" +description: "How to use custom metadata in your Docker images" +group: pipelines +redirect_from: + - /docs/metadata-annotations/ + - /docs/docker-registries/metadata-annotations/ +toc: true +--- +Images built by Codefresh can be annotated with customized metadata. +This article explains how to create advanced view of your images and enrich them with custom metadata which perfectly fits your flow and image management process. + +{% + include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png" + url="/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png" + alt="Codefresh Docker registry metadata" + max-width="65%" +%} + +>We have since expanded this feature and now you are able to add custom annotations to [pipelines and builds as well]({{site.baseurl}}/docs/pipelines/annotations/). Notice also that the syntax shown in this page is deprecated but still supported. For the new syntax +see [Hooks in pipelines]({{site.baseurl}}/docs/pipelines/hooks/). + +## Metadata types + +Images built by Codefresh can be annotated with an array of key-value metadata. +Metadata values may be of the following types: + +{: .table .table-bordered .table-hover} +| Annotation type | Guidelines | Example | +| --------------- | ------------------------------------------------ | -------------------------------------------------------- | +| String | Use string | 'Example note' | +| Number | use numeric value to set this kind of annotation | 9999 | +| Boolean | Use true / false value | true | +| Percentage bar | use 0-100 value ending with % | 85% | +| Link | use url | {% raw %}`${{CF_COMMIT_URL}}`{% endraw %} | + +You can also use [Expression evaluations]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/#condition-expression-syntax) to set metadata. + +## Annotate your images using Codefresh YAML +You can annotate an image as part of its build process and also on post build steps. + +{:.text-secondary} +### Build step Image Metadata Annotation +You can annotate an image as part of its build process by declaring the metadata value on the [Build step]({{site.baseurl}}/docs/pipelines/steps/build/): +1. The `metadata` attribute +2. The `set` operation +3. An array of key-value metadata + + `build-metadata-annotation` +{% highlight yaml %} +build_step: + type: build + ... + metadata: # Declare the metadata attribute + set: # Specify the set operation + - qa: pending + - commit_message: {% raw %}${{CF_COMMIT_MESSAGE}}{% endraw %} + - exit_code: 0 + - is_main: + evaluate: "{% raw %}'${{CF_BRANCH}}{% endraw %}' == 'main'" +{% endhighlight %} + +{:.text-secondary} +### Adding annotations to Built images on post-build steps +Any step in the YAML workflow can annotate built images by using [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). +To annotate a built image, configure any step with: +1. The post-step operation +2. The `metadata` attribute +3. The `set` operation +4. A list of target images with the variable syntax of {% raw %}`${{build_step_name.imageId}}`{% endraw %} +5. An array of key-value metadata + + `annotating-step` +{% highlight yaml %} +build_step: + type: build + ... + +any_step: + ... + on_success: # Execute only once the step succeeded + metadata: # Declare the metadata attribute + set: # Specify the set operation + - {% raw %}${{build_step.imageId}}{% endraw %}: # Select any number of target images + - qa: pending + + on_fail: # Execute only once the step failed + metadata: # Declare the metadata attribute + set: # Specify the set operation + - {% raw %}${{build_step.imageId}}{% endraw %}: # Select any number of target images + - exit_code: 1 + + on_finish: # Execute in any case + metadata: # Declare the metadata attribute + set: # Specify the set operation + - {% raw %}${{build_step.imageId}}{% endraw %}: # Select any number of target images + - is_main: + evaluate: "{% raw %}'${{CF_BRANCH}}'{% endraw %} == 'main'" +{% endhighlight %} + +### Example - Quality Image Metadata Annotation +You can set a quality indicator to images to show if they passed or failed tests. An image with the boolean annotation `CF_QUALITY` set to true will have a quality indicator in the 'Images' view. + + `YAML` +{% highlight yaml %} +version: '1.0' +steps: + build_step: + type: build + image_name: myrepo/imagename + working_directory: ./ + dockerfile: Dockerfile + + unit_test: + image: {% raw %}'${{build_step}}'{% endraw %} + working_directory: IMAGE_WORK_DIR + commands: + - echo test + on_success: + metadata: + set: + - {% raw %}'${{build_step.imageId}}'{% endraw %}: + - CF_QUALITY: true + on_fail: + metadata: + set: + - {% raw %}'${{build_step.imageId}}'{% endraw %}: + - CF_QUALITY: false +{% endhighlight %} + +Image quality has 3 indicators: +* True - this image is considered a quality image (ex. passed tests), +* False - this image is not considered a quality image (ex. when tests failed but the image was already built). +* No value (nobody set the annotation) - this image has no quality indicator. + +{% include image.html lightbox="true" file="/images/pipeline/docker-image/quality-image-annotation.png" url="/images/pipeline/docker-image/quality-image-annotation.png" caption="Quality image annotation" max-width="40%" %} + + +## Viewing Image Metadata Annotations +You can view an image's metadata annotation by: +1. Navigating to the `Images` view +2. Selecting the target image +3. Selecting the `Annotations` tab + +{% + include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png" + url="/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png" + alt="Image annotations" + caption="Image annotations" + max-width="65%" +%} + +In addition, you can add selected annotations to the images table on images page. To display an annotation in the image table, click on the gear icon at the top right corner of image page and then select all annotations you want to display. + +{% + include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/docker-image-metadata/annotations-image-table.png" + url="/images/pipeline/codefresh-yaml/docker-image-metadata/annotations-image-table.png" + alt="Annotations in image table" + caption="Annotations in image table" + max-width="65%" +%} + + +## Annotating images programmatically + +It is also possible to annotate images with the [Codefresh CLI](https://codefresh-io.github.io/cli/). + +First find the id of an image that you wish to annotate with the command + +``` +codefresh get images +``` + +You can also search for a specific image by name: + +``` +$ codefresh get images --image-name custom +ID NAME TAG CREATED SIZE PULL +b5f103a87856 my-custom-docker-image bla Fri Feb 01 2019 91.01 MB r.cfcr.io/kostis-codefresh/my-custom-docker-image:bla +``` +Then once you have the ID of the image you can use the [annotate command](https://codefresh-io.github.io/cli/images/annotate-image/) to add extra metadata: + +``` +codefresh annotate image b5f103a87856 -l coverage=75 +``` + +## Using custom metadata in Codefresh pipelines + +You can also use the Codefresh CLI to fetch existing metadata from images. It is then very easy to extract and process specific fields with [yq](https://github.com/kislyuk/yq) + +Here is an example +``` +$ codefresh get image b5f103a87856 --output=yaml | yq -r .annotations.coverage +75 +``` + +You can then easily process the metadata (e.g. with scripts) and take decisions according to them. Here is an example +step that will fail the build if test coverage on an image is less than 80% + + `YAML` +{% highlight yaml %} +version: '1.0' +steps: + findLabel: + title: Get image label for coverage + image: codefresh/cli + commands: + - export MY_COVERAGE=$(codefresh get image b5f103a87856 --output=yaml | yq -r .annotations.coverage) + - echo "Coverage is $MY_COVERAGE" + - if [[ $MY_COVERAGE -lt "80" ]]; then exit 1 ; fi + +{% endhighlight %} + +The possibilities are endless as you can take any combination of image metadata and use any complex conditional +in order to process them in a Codefresh pipeline. + + +## Related articles +[External Docker Registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +[Accessing a Docker registry from your Kubernetes cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/access-docker-registry-from-kubernetes/) diff --git a/_docs/pipelines/docker-operations.md b/_docs/pipelines/docker-operations.md deleted file mode 100644 index 4678a46d4..000000000 --- a/_docs/pipelines/docker-operations.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Using Docker" -description: "" -group: pipelines -toc: true ---- - -Coming soon diff --git a/_docs/pipelines/hooks.md b/_docs/pipelines/hooks.md new file mode 100644 index 000000000..9460bc536 --- /dev/null +++ b/_docs/pipelines/hooks.md @@ -0,0 +1,634 @@ +--- +title: "Hooks in pipelines" +description: "Execute commands before/after each pipeline or step" +group: pipelines +toc: true +--- + +Hooks in pipelines allow you to run specific actions at the end and the beginning of the pipeline, as well as before/after a step. + +Hooks can be a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/), as you need to define: + +1. A Docker image that will be used to run specific commands. +1. One or more commands to run within the context of that Docker image. + +For simple commands we suggest you use a small image such as `alpine`, but any Docker image can be used in hooks. + +## Pipeline hooks + +Codefresh allows you to run a specific step before each pipeline as well as after it has finished. + +### Running a step at the end of the pipeline + +You can easily run a step at the end of pipeline, that will execute even if one of the steps have failed (and thus the pipeline is stopped in middle): + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "cleanup after end of pipeline" + +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Hello world" + step2: + title: "Step 2" + type: "freestyle" + image: node:10-buster + commands: + - echo "There was an error" + - exit 1 +{% endraw %} +{% endhighlight %} + +In the example above we define a hook for the whole pipeline that will run a step (the `exec` keyword) inside `alpine:3.9` and will simply execute an `echo` command. Because we have used the `on_finish` keyword, this step will execute even if the whole pipeline fails. + +This scenario is very common if you have a cleanup step or a notification step that you always want to run at the end of the pipeline. You will see the cleanup logs in the top pipeline step. + + {% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/hooks/cleanup-step.png" +url="/images/pipeline/codefresh-yaml/hooks/cleanup-step.png" +alt="Running a cleanup step" +caption="Running a cleanup step" +max-width="80%" +%} + +Apart from the `on_finish` keyword you can also use `on_success` and `on_fail` if you want the step to only execute according to a specific result of the pipeline. It is also possible to use multiple hooks at the same time: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "cleanup after end of pipeline" + on_success: + exec: + image: alpine:3.9 + commands: + - echo "Send a notification only if pipeline was successful" + on_fail: + exec: + image: alpine:3.9 + commands: + - echo "Send a notification only if pipeline has failed" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Hello world" + step2: + title: "Step 2" + type: "freestyle" + image: node:10-buster + commands: + - echo "There was an error" + - exit 1 #Comment this line out to see how hooks change + +{% endraw %} +{% endhighlight %} + +Note that if you have multiple hooks like the example above, the `on_finish` segment will always execute after any `on_success`/`on_fail` segments (if they are applicable). + + +### Running a step at the start of the pipeline + +Similar to the end of the pipeline, you can also execute a step at the beginning of the pipeline with the `on_elected` keyword: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "Creating an adhoc test environment" + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "Destroying test environment" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" + step2: + title: "Step 2" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running acceptance tests on test environment" + +{% endraw %} +{% endhighlight %} + +All pipeline hooks will be shown in the "initializing process" logs: + + {% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/hooks/before-pipeline.png" +url="/images/pipeline/codefresh-yaml/hooks/before-pipeline.png" +alt="Hooks before a pipeline" +caption="Hooks before a pipeline" +max-width="80%" +%} + +It is possible to define all possible hooks (`on_elected`, `on_finish`, `on_success`, `on_fail`) in a single pipeline, if this is required by your development process. + +## Step hooks + +Hooks can also be defined for individual steps inside a pipeline. This capability allows for more granular control on defining prepare/cleanup phases for specific steps. + +The syntax for step hooks is the same as pipeline hooks (`on_elected`, `on_finish`, `on_success`, `on_fail`), you just need to put the respective segment under a step instead of the root of the pipeline. + +For example, this pipeline will always run a cleanup step after integration tests (even if the tests themselves fail). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Compile application" + type: "freestyle" + image: node:10-buster + commands: + - echo "Building application" + step2: + title: "Unit testing" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running unit tests" + hooks: + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "Create test report" + step3: + title: "Uploading artifact" + type: "freestyle" + image: node:10-buster + commands: + - echo "Upload to artifactory" +{% endraw %} +{% endhighlight %} + + +Logs for steps hooks are shown in the log window of the step itself. + + {% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/hooks/step-after.png" +url="/images/pipeline/codefresh-yaml/hooks/step-after.png" +alt="Hooks before a pipeline" +caption="Hooks before a pipeline" +max-width="80%" +%} + +As with pipeline hooks, it is possible to define multiple hook conditions for each step. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Compile application" + type: "freestyle" + image: node:10-buster + commands: + - echo "Building application" + step2: + title: "Security scanning" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Security scan" + hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "Authenticating to security scanning service" + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "Uploading security scan report" + on_fail: + exec: + image: alpine:3.9 + commands: + - echo "Sending slack notification" + +{% endraw %} +{% endhighlight %} + +The order of events in the example above is the following: + +1. The `on_elected` segment executes first (authentication) +1. The step itself executes (the security scan) +1. The `on_fail` segment executes (only if the step throws an error code) +1. The `on_finish` segment always executes at the end + + +## Running steps/plugins in hooks + +Hooks can use [steps/plugins](https://steps.codefresh.io). With plugins you have to specify: + +- The type field for the step/plugin. +- The arguments needed for the step/plugin. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +hooks: #run slack-notifier hook on build completion + on_finish: + steps: + exec: + type: slack-notifier + arguments: + SLACK_HOOK_URL: '${{SLACK_WEBHOOK_URL}}' + SLACK_TEXT: '${{SLACK_TEXT}}' + +steps: + step1: + title: "Freestyle step" + type: "freestyle" + image: alpine + commands: + - echo "Codefresh" + hooks: #run slack-notifier hook on step completion + on_finish: + steps: + exec: + type: slack-notifier + arguments: + SLACK_HOOK_URL: '${{SLACK_WEBHOOK_URL}}' + SLACK_TEXT: '${{SLACK_TEXT}}' +{% endraw %} +{% endhighlight %} + +## Controlling errors inside pipeline/step hooks + +By default if a step fails within a pipeline, the whole pipeline will stop and be marked as failed. +This is also true for `on_elected` segments as well. If they fail, then the whole pipeline will fail (regardless of the position of the segment in a pipeline or step). However, this only applies to `on_elected` segments. +`on_success`, `on_fail` and `on_finish` segments do not affect the pipeline outcome at all, and a pipeline will continue even if one of these segments fails. + +For example the following pipeline will fail right away, because the pipeline hook fails at the beginning. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "failing on purpose" + - exit 1 +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" +{% endraw %} +{% endhighlight %} + +You can change this behavior by using the existing [fail_fast property]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#execution-flow) inside an `on_elected` hook. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + fail_fast: false + commands: + - echo "failing on purpose" + - exit 1 +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" +{% endraw %} +{% endhighlight %} + +This pipeline will now execute successfully and `step1` will still run as normal, because we have used the `fail_fast` property. You can also use the `fail_fast` property on step hooks as well: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" + hooks: + on_elected: + exec: + image: alpine:3.9 + fail_fast: false + commands: + - echo "failing on purpose" + - exit 1 +{% endraw %} +{% endhighlight %} + + +>Notice that the `fail_fast` property is only available for `on_elected` hooks. The other types of hooks (`on_finish`, `on_success`, `on_fail`) do not affect the outcome of the pipeline in any way. Even if they fail, the pipeline will continue running to completion. This behavior is not configurable. + + +## Using multiple steps for hooks + +In all the previous examples, each hook was a single step running on a single Docker image. You can also define multiple steps for each hook. This is possible by inserting an extra `steps` keyword inside the hook and listing multiple Docker images under it: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: + steps: + mycleanup: + image: alpine:3.9 + commands: + - echo "echo cleanup step" + mynotification: + image: cloudposse/slack-notifier + commands: + - echo "Notify slack" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" +{% endraw %} +{% endhighlight %} + +By default all steps in a single hook segment are executed one after the other. But you can also run them in [parallel]({{site.baseurl}}/docs/pipelines/advanced-workflows/#inserting-parallel-steps-in-a-sequential-pipeline): + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Compile application" + type: "freestyle" + image: node:10-buster + commands: + - echo "Building application" + step2: + title: "Unit testing" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests" + - exit 1 + hooks: + on_fail: + mode: parallel + steps: + upload-my-artifact: + image: maven:3.5.2-jdk-8-alpine + commands: + - echo "uploading artifact" + my-report: + image: alpine:3.9 + commands: + - echo "creating test report" +{% endraw %} +{% endhighlight %} + +You can use multiple steps in a hook in both the pipeline and the step level. + + +## Using annotations and labels in hooks + +The hook syntax can also be used as a unified interface for encompassing the existing syntax of [build annotations]({{site.baseurl}}/docs/pipelines/annotations/) and [metadata]({{site.baseurl}}/docs/pipelines/docker-image-metadata/). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "Creating an adhoc test environment" + annotations: + set: + - entity_type: build + annotations: + - my_annotation_example1: 10.45 + - my_string_annotation: Hello World +steps: + clone: + title: Cloning source code + type: git-clone + arguments: + repo: 'codefresh-contrib/golang-sample-app' + revision: master + build-image: + type: build + image_name: my-golang-image + working_directory: '${{clone}}' + tag: master + hooks: + on_success: + exec: + image: alpine:3.9 + commands: + - echo "Scanning docker image" + metadata: # setting metadata + set: + - '${{build-image.imageId}}': + - status: 'Success' +{% endraw %} +{% endhighlight %} + +Note however, that if you decide to use annotations and metadata inside hooks, you cannot mix and max the old syntax with the new syntax. + +The following pipeline is **NOT** valid: + +`invalid-codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + test: + image: alpine + on_success: # you cannot use old style together with hooks + annotations: + set: + - entity_type: build + annotations: + - status: 'success' + commands: + - echo block + hooks: + on_success: + annotations: + set: + - entity_type: build + annotations: + - status: 'success' +{% endraw %} +{% endhighlight %} + +The pipeline is not correct, because the first segment of annotations is directly under `on_success` (the old syntax), while the second segment is under `hooks/on_success` (the new syntax). + + +## Syntactic sugar syntax + +To simplify the syntax for hooks, the following simplifications are also offered: + +If you do not want to use metadata or annotations in your hook the keyword `exec` can be omitted: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: # no exec keyword + image: notifications:master + commands: + - ./send_workflow_finished.js +steps: + build: + type: build + image_name: my_image + tag: master + hooks: + on_fail: # no exec keyword + image: notifications:master + commands: + - ./send_build_failed.js +{% endraw %} +{% endhighlight %} + + +If you do not want to specify the Docker image you can simply omit it. Codefresh will use the `alpine` image in that case to run the hook: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: # no image keyword - alpine image will be used + - echo "Pipeline starting" +steps: + build: + type: build + image_name: my_image + tag: master + hooks: + on_success: # no image keyword - alpine image will be used + exec: + - echo "Docker image was built successfully" + annotations: + set: + - entity_type: build + annotations: + - status: 'Success' +{% endraw %} +{% endhighlight %} + + + If you don't use metadata or annotations, you can also completely remove the `exec` keyword and just mention the commands you want to run (`alpine` image will be used by default): + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: # no exec/image keyword - alpine image will be used + - echo "Pipeline starting" +steps: + build: + type: build + image_name: my_image + tag: master + hooks: + on_success: # no exec/image keyword - alpine image will be used + - echo "Docker image was built successfully" +{% endraw %} +{% endhighlight %} + +## Using Type Steps / Plugins in hooks + +You can use a type step / plugins in hooks. With this you will need to change `exec` into `steps` with the information needed for the step. + +Below is an example pipeline hook using the `slack-notifier` step/plugin for when the pipeline starts. + +```yaml +hooks: + on_elected: + steps: + exec: + slack_pending: + type: slack-notifier + arguments: + SLACK_HOOK_URL: {% raw %}'${{SLACK_WEBHOOK_URL}}'{% endraw %} + SLACK_TEXT: '*Build Started* :crossed_fingers:' +``` + +## Limitations of pipeline/step hooks + +With the current implementation of hooks, the following limitations are present: + +* The [debugger]({{site.baseurl}}/docs/pipelines/debugging-pipelines/) cannot inspect commands inside hook segments +* Hooks are not supported for [parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +* Storage integrations don't resolve in hooks (for example, [test reports]({{site.baseurl}}/docs/testing/test-reports/#producing-allure-test-reports-from-codefresh-pipelines)) +* Step hook does not support the working_directory field aka `working_directory: ${{clone}}` + +## Related articles +[Conditional execution of steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) +[Working Directories]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Annotations in CI pipelines]({{site.baseurl}}/docs/pipelines/annotations/) + + diff --git a/_docs/pipelines/introduction-to-codefresh-pipelines.md b/_docs/pipelines/introduction-to-codefresh-pipelines.md new file mode 100644 index 000000000..6369bb74a --- /dev/null +++ b/_docs/pipelines/introduction-to-codefresh-pipelines.md @@ -0,0 +1,336 @@ +--- +title: "Introduction to Codefresh pipelines" +description: "Understand how Codefresh pipelines work" +group: pipelines +redirect_from: + - /docs/introduction-to-codefresh-pipelines/ + - /docs/configure-ci-cd-pipeline/ +toc: true +--- + + +The central component of the Codefresh platform for continuous integration (CI) are pipelines. Pipelines are workflows that contain individual steps, with each step responsible for a specific action in the CI process. + +Use CI pipelines to: + +* Compile and package code +* Build Docker images +* Push Docker images to any [Docker Registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +* Deploy applications/artifacts to VMs, Kubernetes clusters, FTP sites, S3 buckets etc. +* Run [unit tests]({{site.baseurl}}/docs/testing/unit-tests/), [integration tests]({{site.baseurl}}/docs/testing/integration-tests/), acceptance tests etc. +* Any custom action that you define + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +url="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +alt="Codefresh CI pipelines" +caption="Codefresh CI pipelines" +max-width="90%" +%} + +## Why are Codefresh CI pipelines different? + +Codefresh offers unique characteristics in CI pipelines that serve as the cornerstone of the build/deploy process: + +1. All [steps]({{site.baseurl}}/docs/pipelines/steps/) in Codefresh pipelines are executed inside a Docker container of your choosing. +1. All steps in Codefresh share the same "workspace" in the form of a shared Docker volume. +1. The shared Docker volume is automatically cached between pipeline executions. +1. Every successful pipeline automatically pushes its Docker image to the default Docker registry defined in your account. +1. Codefresh has a distributed Docker cache for all build nodes and caches layers similar to the docker daemon on your workstation. This is fully automated, and does not need to be configured to activate it. + +### Using Docker containers as build tooling + +Unlike traditional solutions, Codefresh was built from the ground up to have full Docker support. All Codefresh pipelines +deal with Docker images, either using them as runtime tools or creating them as deployment artifacts. +Everything that happens in Codefresh uses a Docker image behind the scenes. + +It is important that you understand how to take advantage of Docker-based pipelines as they are much more powerful than +traditional VM solutions. The capability to define your own tooling cannot be understated. It is the fastest way to take +full control of your build tools and to upgrade them easily. + +With traditional VM-based build solutions, you are constrained on the build and deployment tools provided by the vendor. +If for example you need a new version of Node/Java/Python other than the one that is provided on the build slave, you have to wait for your vendor to add it. If you need to use a special tool (e.g terraform, gcloud) and the vendor does +not support it you are out of luck. + +With Codefresh you don't have to care about what is installed in the Runners that execute your builds. They can run *any* Docker image of your choosing. You are free to update the version of the image used at any given time. + +Here is an example: + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/steps-example1.png" +url="/images/pipeline/introduction/steps-example1.png" +alt="Pipeline with three steps" +caption="Pipeline with three steps" +max-width="70%" +%} + + +1. The first step runs under the context of a Node image that prepares the application and runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/). +1. The second step uses an image with s3 command line tools and uploads the test results to a bucket that holds test reports. +1. The helm step creates a Helm chart and pushes it to a Helm repository. + +You don't need to contact Codefresh and ask them to add the S3 executable on the build runners. You just use a prebuilt Docker image that contains it. The version used for Node is defined by you and if you wish to upgrade to another version +you simply change the definition of the pipeline. + + +Here is another example: + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/steps-example2.png" +url="/images/pipeline/introduction/steps-example2.png" +alt="Codefresh steps example 2" +caption="Pipeline with 4 steps" +max-width="70%" +%} + +1. The first step runs under the context of a Maven image that compiles the code and creates an executable. +1. The second step uses a Docker image that contains terraform and creates a single ECS instance in AWS. +1. The third step uses a custom Docker image that deploys to the ECS container that was just created. +1. The last step uploads the Maven reports that were created in step 1 to an FTP site. + +You should now start seeing the endless possibilities. You can mix and match any Docker image (either a public one +or your own) to use a build context in your step. This makes Codefresh a future-proof solution for all build tools +that exist now and all of them that will appear in the future. As long as there is a Docker image for a tool, Codefresh +can use it in a pipeline without any extra configuration. + +Codefresh also offers a [marketplace](https://codefresh.io/steps/){:target="\_blank"} with several existing plugins. + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-directory.png" +url="/images/pipeline/plugin-directory.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="80%" +%} + + +All plugins in the marketplace are open-source, and we accept external contributions so you can easily add your own. + + +### Sharing the workspace between build steps + +We have seen in the previous section that Codefresh can use Docker images as the context of a build step. The second +important point to understand regarding Codefresh CI pipelines is that the default workspace of each step is shared between all steps in a pipeline. + +This happens via a Docker volume which is attached to all Docker containers that represent each step. This volume is +always available at `/codefresh/volume`, and is used as the parent folder where the project is cloned. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume.png" +url="/images/pipeline/introduction/codefresh-volume.png" +alt="Codefresh volume" +caption="All steps share the same volume" +max-width="90%" +%} + +Anything in this volume is available to all steps of the pipeline (as well as to subsequent executions of the same pipeline as we will see later). + +Again, this places Codefresh ahead of traditional solutions that execute build steps in a completely isolated manner. +In traditional VM-based builds, using artifacts produced from one step in another step, is a complicated process as one +must declare which artifact folders should be re-used. Artifact re-use sometimes happens with compression/decompression +of the respective folder resulting in really slow builds if a project is very big. + +Codefresh does not need to bother the user with artifact reuse across steps. *Anything* that is placed in the shared Codefresh volume will automatically be available to the next steps in the pipeline without any extra configuration. + +Example 1 + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume-example1.png" +url="/images/pipeline/introduction/codefresh-volume-example1.png" +alt="Codefresh volume example 1" +caption="Re-using Node Modules" +max-width="90%" +%} + +1. The first step runs `npm install` and downloads all libraries in `node_modules` into the shared Codefresh volume. +1. The second step runs `npm test`. The folder `node_modules` is still present from the previous step. + +Example 2 + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume-example2.png" +url="/images/pipeline/introduction/codefresh-volume-example2.png" +alt="Codefresh volume example 2" +caption="Re-using Test reports" +max-width="90%" +%} + +1. The first step runs `mvn test` and produces some test reports in `target/surefire-reports` into the shared Codefresh volume. +1. The next step uploads these reports using FTP to an external site. + + +The common volume shared among build steps makes it very easy to create pipelines that work in a gradual manner +where any step in the pipeline is using artifacts produced by a previous one. + +>The shared volume is **NOT available** in [build steps]({{site.baseurl}}/docs/pipelines/steps/build/). This is not a Codefresh limitation. Docker itself [does not allow volumes during builds](https://github.com/moby/moby/issues/14080){:target="\_blank"}. There is no folder `/codefresh/volume` inside a Dockerfile for you to access. + +You can also use [environment variables]({{site.baseurl}}/docs/pipelines/variables/) to share information between steps. All predefined environment variables are available to all steps, and each individual step can use `cf_export` to dynamically inject extra environment variables during the build process. + + +## Working with Codefresh pipelines + +Now that we know the basics, we can see how you can take advantage of Docker-based pipelines in order to build and deploy your projects. + + +### Cloning the source code + +You can clone source code using the built-in [Git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) as the first step in a CI pipeline, or manually run your own Git clone commands in a freestyle step. Codefresh has built-in [Git integration]({{site.baseurl}}/docs/integrations/git-providers/) with all popular git providers (both cloud and on-premises installations). + +Codefresh uses the shared volume as the parent folder of the project. So if your pipeline is connected to a Git repo that contains `my-project` the following will happen: + +* `/codefresh/volume` is the shared directory for all steps +* `/codefresh/volume/my-project` is where the source code exists. This is also the current working directory +* Any other directory (e.g. `/bin`, `/var`, `/opt`) depends on the current container image that is used as build context + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/checkout.png" +url="/images/pipeline/introduction/checkout.png" +alt="Codefresh checkout folder" +caption="Codefresh checkout folder" +max-width="80%" +%} + +There are three important points to consider regarding these folders: + +1. The [working directory]({{ site.baseurl }}/docs/pipelines/what-is-the-codefresh-yaml/#working-directories) of each step is by default the project folder (e.g. `/codefresh/volume/my-project`). Therefore +your build step can run commands exactly as you would run them locally (e.g. `npm install, pip install, mvn package, bundle install`). + +1. Notice that the project folder is placed on the Codefresh volume, so by default it is also available to all other steps. The code that you check out in the beginning, as well as all other files that are created on it, are available to all steps. Once you create `node_modules`, or any other folder that exists inside the project folder, it will automatically persist for all other steps. + +1. Finally, `/codefresh/volume` is an internal folder name, and you should use `{% raw %}${{CF_VOLUME_PATH}}{% endraw %}` in your codefresh.yml file +if you really want to reference this folder. You can also reference your project folder as `{% raw %}${{CF_VOLUME_PATH}}/${{CF_REPO_NAME}}{% endraw %}` if you need it. + +See the [System Provided Variables]({{site.baseurl}}/docs/pipelines/variables/#system-provided-variables) section for more information. + +### Working with Docker inside a Codefresh pipeline + +We have already seen that Codefresh pipelines are based on Docker images and that each step runs inside the context of a Docker container. You might be wondering how you can run Docker commands directly inside a Codefresh pipeline. + +The answer is that you don't. Even though in the future Codefresh might allow for Docker-in-Docker capabilities, at the moment this is not supported for security reasons (only enterprise customers have access to the underlying Docker daemon). Any scripts that you already have that run Docker commands on their own will need to be adapted to Codefresh pipelines. + +Usually you want to run a docker command for four reasons: + +1. To build a Docker image +1. To push a Docker image +1. To run a docker-compose setup +1. To run a Docker container + +For all these situations Codefresh gives you special pipeline steps that perform the respective action. These are: + +1. The [build step]({{site.baseurl}}/docs/pipelines/steps/build/) +1. The [push step]({{site.baseurl}}/docs/pipelines/steps/push/) +1. The [compositions step]({{site.baseurl}}/docs/pipelines/steps/composition/) +1. The [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + +The commands you define in a freestyle step run automatically in a Docker container that is attached to that step once the pipeline executes. + +Therefore, this command on your local workstation: + +``` +docker run python:3.6.4-alpine3.6 pip install . +``` + +will become in Codefresh + +``` +CollectAllMyDeps: + title: Install dependencies + image: python:3.6.4-alpine3.6 + commands: + - pip install . +``` +For the plugins in the [Step Marketplace](https://codefresh.io/steps/) we already give an example of the YAML part that must be included in your pipeline: + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-example.png" +url="/images/pipeline/plugin-example.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="50%" +%} + +Each plugin also defines its input/output in the form of environment variables and files. + +### Creating Docker images dynamically as build tools + + +Now we reach one of the most powerful features of Codefresh pipelines. We have already seen that [freestyle pipeline steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) are just a series of commands that run inside the context of a Docker container. In most cases the images used +for the freestyle steps are known in advance and come from public (e.g. Dockerhub) or [private Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). + +Codefresh is one the few CI/CD solutions that not only offers easy Docker registry integration + accessible to all pipelines +but also allows you to **build docker images on demand in the same pipeline where they are required**. + +This means that you can create a special Docker image in an early step inside a Codefresh pipeline and then reference it in a later step in the same pipeline. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/dynamic-docker-builds.png" +url="/images/pipeline/introduction/dynamic-docker-builds.png" +alt="Codefresh dynamic docker builds" +caption="Creating dynamically Docker images as build steps" +max-width="90%" +%} + +Let's say for example that you are moving a legacy application to Codefresh which is deployed using a special Python script. Your main application is a Ruby-On-Rails app. Both applications exist in the same git repository (we mention this for simplicity reasons, Codefresh also supports checking out code from multiple repositories). + +You can create a single pipeline with Codefresh that does the following: + +1. Checks out the code +1. Creates a Docker image based on Python for the deployment tool +1. Uploads the Python tool Docker image to the internal registry +1. Builds the Ruby application using a freestyle step with the R-O-R image from Dockerhub +1. Deploys the Ruby application by running the Python based deployment tool image (after pulling it first) + +This concept is ground-breaking as it allows you to automatically update your build tools that are used in any pipeline. +Even though you could manually create the Docker images yourself before-hand, it is better to completely automate them +inside the pipeline they are actually needed. This ensures that both the application and its tooling are always at the latest version. + +### How caching works in Codefresh + +Codefresh employs several caching mechanisms for both Dockerized and non-dockerized applications. The shared volume is also cached behind the scenes automatically. See our [caching guide]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipeline-caching/) for more details. + +### Calling other pipelines + +It is also possible to chain multiple pipelines together in Codefresh. To accomplish this, Codefresh offers +a special Docker image that contains the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} and allows you to trigger another pipeline using its name. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/call-pipeline.png" +url="/images/pipeline/introduction/call-pipeline.png" +alt="Codefresh call pipeline" +caption="Calling another pipeline" +max-width="80%" +%} + +Notice that each pipeline in Codefresh is completely isolated from the other. They use a different Docker volume so the build context of each one cannot access files from the other. This may change in the future, but for the time being +you should know that only steps within the same pipeline can share artifacts. + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Build and Docker caching]({{site.baseurl}}/docs/pipelines/pipeline-caching/) + + + diff --git a/_docs/pipelines/marketplace.md b/_docs/pipelines/marketplace.md deleted file mode 100644 index 4295f74ee..000000000 --- a/_docs/pipelines/marketplace.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Codefresh marketplace" -description: "" -group: pipelines -toc: true ---- - -The Codefresh Hub for Argo documentation can be found in its [official repository](https://github.com/codefresh-io/argo-hub). - -Codefresh is fully backing this project and will help all developers that want to contribute to succeed. - -You can find documentation about how to contribute to the argo hub in the [official repository contribute section](https://github.com/codefresh-io/argo-hub#How-to-Contribute) - diff --git a/_docs/pipelines/monitoring-pipelines.md b/_docs/pipelines/monitoring-pipelines.md new file mode 100644 index 000000000..3f9f4426e --- /dev/null +++ b/_docs/pipelines/monitoring-pipelines.md @@ -0,0 +1,472 @@ +--- +title: "Monitoring pipelines" +description: "Viewing your builds and logs" +group: pipelines +toc: true +--- + + +All pipeline activity in Codefresh can be viewed in the *Builds* tab. +* The global build view shows builds for all projects across your organization +* The project-based view from the settings inside an individual project shows the builds for the selected project + +Both views have the same controls and filters. + +## Viewing pipeline status + +Each screen contains all builds sorted from the most recent to the oldest. The first time you visit +the screen there are no filters defined. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/builds-dashboard.png" +url="/images/pipeline/monitoring/builds-dashboard.png" +alt="Pipeline Activity in Codefresh" +caption="Pipeline activity" +max-width="80%" +%} + +By default, it shows all builds that is happening in Codefresh. To narrow the list you can use the filters on the top +of the screen. + +### Applying filters on the build view + +Directly above the list you can find several filters. + +At the most basic level you can choose between + + * *Running* builds that are currently executing + * *Pending* builds which are queued and waiting to start + * *Delayed* builds which cannot run yet, because there are no free pipeline builders. + A build can be delayed for a maximum of seven days, and each account can have up to 1000 delayed builds at any time. + * Builds that are delayed for more than seven days are terminated with a _Delay time limit exceeded_ reason. + * If the total number of delayed builds exceed 1000, older builds are terminated with a _Maximum delayed workflows exceeded_ reason. + + * *All* builds regardless of running stage (this is the default) + +You can further filter the builds by choosing the various filter types that specify the build job. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-filtering.png" +url="/images/pipeline/monitoring/build-filtering.png" +alt="Pipeline filters in Codefresh" +caption="Available filters" +max-width="50%" +%} + +The available filters are: + +* *Pipeline* - any of the pipelines available. +* *Provider* - type of [Git provider]({{site.baseurl}}/docs/integrations/git-providers/). +* *Repository* - Git repository from the attached [trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/). +* *Type* - build, [launch a test environment]({{site.baseurl}}/docs/getting-started/on-demand-environments/#launching-a-docker-image-using-codefresh). +* *Branch* - any of the available branches from the attached Git trigger. +* *Committer* - person that made the commit that triggered the build. +* *Environment* - which [environment]({{site.baseurl}}/docs/deploy-to-kubernetes/environment-dashboard/) was affected. +* *Status* - success, error, in-progress, pending, terminated etc. A Pending status can also indicate that [pipeline build execution has been paused]({{site.baseurl}}/docs/administration/pipeline-settings/#pause-pipeline-executions) for the account. +* *Trigger type* - what type of trigger was responsible for this build +* *Git event* - in the case of [git triggers]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) the exact event + +Notice that all filters are multiple-choice so you can select multiple values for each filter category. +At any given point you can see all the active filters on top of the screen. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/possible-filters.png" +url="/images/pipeline/monitoring/possible-filters.png" +alt="Pipeline filters in Codefresh" +caption="Active filters" +max-width="50%" +%} + +You can easily remove active filters, by clicking on them and adding/removing values. + +On the right hand side you can also find a filtering toolbar with time options: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-filter-date.png" +url="/images/pipeline/monitoring/build-filter-date.png" +alt="Filtering options for time" +caption="Filtering options for time" +max-width="60%" +%} + +You can combine all previously mentioned filters with the time based filters. + +### Creating build views + +Once you have a set of filters that you use regularly, you can save them as a custom *Build View* by clicking the *Save as View* button +and providing a name. + +Now you can select at the top of the window any of the available build views to automatically filter results according to the respective sets of filters. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-view-selection.png" +url="/images/pipeline/monitoring/build-view-selection.png" +alt="Build View selection" +caption="Build View selection (click to enlarge)" +max-width="50%" +%} + +You can delete existing build-views by clicking on the *manage views* button. +You can change the filters of an existing build view by making a new filter selection and then saving the view with an existing name (effectively overwriting it). + + +### Build details + + +For each individual build you can see several details such as the git hash, the person who made the commit, the pipeline that was triggered as well as how much time it took. For each event type you will also see additional context related information. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-details-entry.png" +url="/images/pipeline/monitoring/build-details-entry.png" +alt="build details in Codefresh" +caption="Build details" +max-width="100%" +%} + +Child builds triggered by other builds are identified in the Event column by the icon {::nomarkdown} {:/}. +The Parent Build column shows the link to the parent build. Mouse over to see the tooltip with information on the parent build. The tooltip includes links to the parent build, repo, branch, commit message, and the ability to filter by repo and branch. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/child-parent-build-info.png" +url="/images/pipeline/monitoring/child-parent-build-info.png" +alt="Child build in Builds list" +caption="Child build in Builds list" +max-width="70%" +%} + +There are also extra options if you click the small "3-dot" menu button on the right. For a particular build, you can: + +- View the logs +- View the YAML +- View or add [annotations]({{site.baseurl}}/docs/pipelines/annotations/) +- View the images produced (and consequently launch an on-demand [test environment]({{site.baseurl}}/docs/getting-started/on-demand-environments/#launching-a-docker-image-using-codefresh)) + +Notice that if you restart a pipeline it will trigger with the exact settings it *originally* had. So +if this was a manual trigger where you [disabled caching]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) or changed the [notification options](#monitoring-pipelines-that-check-pull-requests), the new +execution will still honor those settings (even if you have changed them for later builds). + +An extra button for test reports will be visible if you are using the [test report feature]({{site.baseurl}}/docs/testing/test-reports/) of Codefresh. + + +## Viewing details for an individual pipeline build + +If you click on any individual pipeline build, you will enter the pipeline build information screen. +From here you can see more details for a build such as the logs, running time and resource metrics. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/pipeline-view.png" +url="/images/pipeline/monitoring/pipeline-view.png" +alt="Pipeline view" +caption="Pipeline view" +max-width="80%" +%} + +Each section in this screen corresponds to each pipeline step. There are two special steps: + +* *Initializing Process* +* *Cloning Main Repository* + +These are Codefresh built-in steps and will appear for most builds (you can also create a pipeline that doesn't clone a git repository by default). The rest of the step names depend on your `codefresh.yml` (or the default step names provided by Codefresh). The different columns take the names from the defined [pipeline stages]({{site.baseurl}}/docs/pipelines/stages/). + +### Viewing status for pipeline steps + +Monitor the status of the steps in the pipeline as they are executed. + +{: .table .table-bordered .table-hover} +| Step Status Icon | Description | +| ------------------------| ---------------- | +|{::nomarkdown} {:/}| Pipeline step completed successfully. | +|{::nomarkdown} {:/}| Pipeline step pending approval has been approved, either manually or automatically. | +|{::nomarkdown} {:/}| Pipeline step pending approval has been denied approval. | +|{::nomarkdown} {:/}| Pipeline step currently running. | +|{::nomarkdown} {:/}| Pipeline step running in debug mode. See [Debugging pipelines]({{site.baseurl}}/docs/configure-ci-cd-pipeline/debugging-pipelines/) for more information. | +|{::nomarkdown} {:/}| Pipeline step gracefully terminating execution. | +|{::nomarkdown} {:/}| Pipeline step execution has been manually or automatically terminated. | +|{::nomarkdown} {:/}| Pipeline step execution has been terminated because of error. | + + + +### Viewing/downloading logs for builds and build steps + +View logs for running and completed builds and download them in HTML or text formats. +You can view logs online, for the entire build or for single or specific steps in the build. Similarly, you can download the logs for the entire build, or for single or specific steps. +The Filter Logs option is useful to view and manage logs, especially for large builds as there is a max size limit for logs. You can also search logs. + +>Note: + The max log size for the entire build is 100MB, and 20MB per step. The system stops generating logs once the build size is exceeded. + For large builds, it is easier to filter the logs by single or multiple steps, and then view/download them. + +1. In the **Builds** page, select a build. +1. To view logs online for the selected build, click **Output** in the lower part of the Build page. +1. Optional. Select **Filter Logs** and then select the step or steps for which view/download logs. + Logs are displayed for the selected steps. +1. From either the context menu on the top-right of the toolbar or from the Output pane, select **Download as HTML** or **Download as text**. + The log file is downloaded with the build ID as the filename, including also the step name if the log is for a single step, in the format `'. + + {% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-logs.png" +url="/images/pipeline/monitoring/build-logs.png" +alt="Build log in Codefresh" +caption="Build log in Codefresh" +max-width="60%" +%} + + +### Viewing variables in pipeline builds + +Variables, both system (environment) and custom (user-defined), are injected into pipelines from different sources and at different levels. +The variables actually used by a specific build of the pipeline varies according to the events that triggered the pipeline. +Select a build to view all its variables, and identify their source, and overrides if any. + +1. In the **Builds** page, either select the build and then open the context-menu, or open the context-menu on the right of the build entry. +1. Select **Variables**. + + {% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-variables-view-option.png" +url="/images/pipeline/monitoring/build-variables-view-option.png" +alt="Variables option in context menu of build entry" +caption="Variables option in context menu of build entry" +max-width="70%" +%} + +{:start="3"} +1. If required, click the Sort icon for the group to sort in alphabetical order. +1. To copy the group's variables to the clipboard, click the Copy icon. + + +Here's an example of the list of variables for a pipeline build. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-variables-list.png" +url="/images/pipeline/monitoring/build-variables-list.png" +alt="List of variables in selected build" +caption="List of variables in selected build" +max-width="50%" +%} + +The variables are grouped by granularity, starting with the global project-level variables and ending with the trigger-level variables with the highest granularity: +* Project +* Shared configuration +* Pipeline +* Trigger + +A variable with a strikethrough indicates an override by the same variable in a lower-level group. For rules on precedence and overrides for variables in builds, see [Variables]({{site.baseurl}}/docs/pipelines/variables/). + +>Notes: + * Variables exported across steps with `cf_export` are not identified as `cf-exported` variables in the list. + * Secret-type variables are always masked. + + + +### Reviewing the yaml for the pipeline + +From the step details you can also click on the yaml tab to see the yaml segment for that individual step: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/yaml-from-step.png" +url="/images/pipeline/monitoring/yaml-from-step.png" +alt="Step Yaml" +caption="Step Yaml" +max-width="60%" +%} + +If you want to see the yaml for the whole pipeline, +- Click the *YAML* tab on the bottom left corner without selecting a step first or +- Select the three dots next to the "RESTART" button on the top-right, and click on *Show YAML* + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/view-pipeline-yaml.png" +url="/images/pipeline/monitoring/view-pipeline-yaml.png" +alt="Pipeline Yaml" +caption="Pipeline Yaml" +max-width="60%" +%} + +In both cases you can copy to clipboard the yaml shown using the button at the top left corner. + +### Viewing pipeline metrics + +Codefresh offers several metrics for the pipeline, and for steps in the pipeline, that allow you to get a better overview of the resources +consumed by your pipeline. + +**Pipeline metrics** + +At the most basic level, Codefresh displays quick metrics while the pipeline is running that include +memory consumed and size of logs: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/quick-pipeline-metrics.png" +url="/images/pipeline/monitoring/quick-pipeline-metrics.png" +alt="Metrics for running pipeline" +caption="Metrics for running pipeline" +max-width="70%" +%} + +* To view memory and disk usage for running or completed pipeline builds, click the **Metrics** tab at the bottom of the Build page. + + * Memory usage: View memory usage (Y-axis) by time (X-axis) for the duration of the build. + * Disk usage: View disk usage (Y-axis) by time (X-axis) for the duration of the build. The red line is set at 90% of the maximum disk space. + To see the precise usage at different points in time, mouse over the dots. + Viewing the actual disk usage for a build during its run allows you to better gauge and define the [minimum disk space required for the build volume]({{site.baseurl}}/docs/pipelines/pipelines/#runtime). + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/pipeline-metrics.png" +url="/images/pipeline/monitoring/pipeline-metrics.png" +alt="Detailed metrics for pipelines" +caption="Detailed metrics for pipelines" +max-width="70%" +%} + + +**Pipeline-step metrics** +For step-specific metrics, first select the step, and then click the **Metrics** tab. +Step metrics are available for memory and CPU usage (not disk space). + + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/step-metrics.png" +url="/images/pipeline/monitoring/step-metrics.png" +alt="Step metrics" +caption="Step metrics" +max-width="70%" +%} + + +### Restarting the pipeline + +You can choose to restart any pipeline by clicking the button at the top right corner. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/restart-pipeline.png" +url="/images/pipeline/monitoring/restart-pipeline.png" +alt="Restart a pipeline" +caption="Restart a pipeline" +max-width="70%" +%} + +>It is important to note that "Restart from beginning" will restart a pipeline with the **same** state that it had in its original execution (including the original git commit). If you want to execute a pipeline again with a new state instead, you need to use the *Run* button in the [pipeline editor]({{site.baseurl}}/docs/pipelines/pipelines/#using-the-inline-pipeline-editor) and selecting any of the available [triggers]({{site.baseurl}}/docs/pipelines/triggers/). + + + +If the pipeline has failed, you can choose to restart it only from the failed step and onwards. + +You can also restart from a failed step right from the graphical view: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/restart-failed.png" +url="/images/pipeline/monitoring/restart-failed.png" +alt="Restart from a failed step" +caption="Restart from a failed step" +max-width="70%" +%} + +>Notice again that restarting a pipeline from a failed step means restarting the pipeline with the **same** state that it had at the point in time (including the original git commit). + +If your pipeline has some flaky steps, you can also use the [retry syntax]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#retrying-a-step) in your yaml instead of restarting them manually each time they fail. + + +## Monitoring pipelines outside the Codefresh UI + +You don't always have to be in the Codefresh UI in order to monitor the status of your builds. + + +### Monitoring pipelines that check Pull Requests + +One of the most +important roles of a CI platform is to automatically update the status of a GIT Pull request with the result +of the respective build. + +{% include +image.html +lightbox="true" +file="/images/getting-started/quick-start-test-pr/auto-build-pr.png" +url="/images/getting-started/quick-start-test-pr/auto-build-pr.png" +alt="Pull Request Status" +caption="Pull Request Status (click image to enlarge)" +max-width="50%" +%} + +If you have setup a [GIT trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) in Codefresh then by default this happens automatically without any other configuration +for all automated commits (that are coming from webhooks). + +If you start a build manually then by default the git status will **not** be updated (i.e. the result of the pipeline +will not affect the status of Pull request) + +If you don't want this behavior to happen, you can enable the git status update checkbox when you launch a pipeline. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/report-notification-checkbox.png" +url="/images/pipeline/monitoring/report-notification-checkbox.png" +alt="Update git status for pipelines triggered manually " +caption="Update git status for pipelines triggered manually (click image to enlarge)" +max-width="50%" +%} + +This way the pipeline status *will* change the build status even with manual builds. + +The same behavior is also available to the [Codefresh CLI](https://codefresh-io.github.io/cli/pipelines/run-pipeline/). In that case use the parameter `--enable-notifications` +to specify if manually triggering a build will also change the GIT status. + +For open source projects you also have the ability to [trigger builds from external forks]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#support-for-building-pull-requests-from-forks). + +### Viewing pipeline status from text/html files + +Codefresh also supports build badges that allow you to show the +status of a Pipeline in Text files or web pages. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-badge.png" +url="/images/pipeline/monitoring/build-badge.png" +alt="Codefresh build badges" +caption="Codefresh build badges" +max-width="100%" +%} + +See the [build badges page]({{site.baseurl}}/docs/pipelines/build-status/) for more information. + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Test report]({{site.baseurl}}/docs/pipelines/test-reports/) +[Status badges]({{site.baseurl}}/docs/pipelines/build-status/) diff --git a/_docs/pipelines/pipeline-caching.md b/_docs/pipelines/pipeline-caching.md new file mode 100644 index 000000000..ef812db8b --- /dev/null +++ b/_docs/pipelines/pipeline-caching.md @@ -0,0 +1,314 @@ +--- +title: "Caching in pipelines" +description: "Faster builds with Codefresh caching" +group: pipelines +toc: true + +--- + +One of the unique features of Codefresh is the multitude of caching systems that take part in a pipeline, and in particular the caching mechanisms targeted specifically at Docker builds. Most types of caching are completely automatic and require zero configuration in order to activate. Caching is a built-in feature in all Codefresh accounts regardless of pricing tier (even free accounts have all types of caching enabled). + +## Types of caching + +Here is a quick overview of all types of caching used in a Codefresh pipeline: + +{: .table .table-bordered .table-hover} +| Caching mechanism | Activation | Used in | Comments | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| Distributed Docker step/image caching | Automatic | All pipeline [steps]({{site.baseurl}}/docs/pipelines/steps/) | | +| Distributed Docker layer caching | Automatic | Pipeline [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) | Mimics local Docker layer cache| +| Caching from previous built image | Automatic | Pipeline build steps | Distributed version of `--cache-from`| +| Docker registry caching | Automatic | Pipeline build steps | Works for all [connected Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/)| +| Traditional build caching | Automatic/manual | Pipeline [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) | See notes for [parallel builds]({{site.baseurl}}/docs/pipelines/advanced-workflows/)| + +All these caching mechanisms are enabled by default and you can [freely disable them]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) if you encounter any issues with caching. + +Let's see these caches in order and how to use them effectively. + +## Distributed Docker image caching + +This is the simplest mode of caching available. All Codefresh steps [are in fact docker images]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/). Once a pipeline runs for the first time, Codefresh will pull all required images from their registries (either public or private) and will cache them for the next build: + + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/image-caching.png" +url="/images/pipeline/caching/image-caching.png" +alt="Caching pipeline steps" +caption="Caching pipeline steps" +max-width="60%" +%} + +The next time the pipeline runs all images will be fetched from cache. This includes built-in steps (e.g the [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/)), custom steps from [the marketplace](https://codefresh.io/steps/) or your own [dynamic pipeline steps]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#creating-docker-images-dynamically-as-build-tools). + +This cache mechanism is completely automatic and is not user configurable. Some ways that you can affect it are: + +* If you use well known images in your pipeline (such as `alpine`, `node`, `maven` etc) they have more probabilities to be already cached by the Codefresh platform +* Use specific tags for your images (e.g. `alpine:3.9.2` and `maven:3-jdk-11-openj9`) instead of generic ones (e.g `alpine:latest` and `node:buster`) that change all the time +* Using small images in the pipeline will make caching/restoring of pipeline steps much faster. + + +You can see in the [pipeline build logs]({{site.baseurl}}/docs/pipelines/steps/build/) if the images of your steps are found in cache or not. Here is an example of a cache hit: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/image-cache-hit.png" +url="/images/pipeline/caching/image-cache-hit.png" +alt="Docker image cache hit" +caption="Docker image cache hit" +max-width="50%" +%} + +and a cache miss: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/image-cache-miss.png" +url="/images/pipeline/caching/image-cache-miss.png" +alt="Docker image cache miss" +caption="Docker image cache miss" +max-width="50%" +%} + +This cache mechanism is applicable to all Codefresh pipelines and steps. + + +## Distributed Docker layer caching + +This type of caching is **only** applicable to [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) and mimics the ways docker layer caching behaves locally on your workstation. + +When you build images locally, Docker will cache intermediate layers making future builds much faster. You can see when caches are used in your build logs. + +{% highlight shell %} +{% raw %} +> docker build . -t my-app +Sending build context to Docker daemon 81.92kB +Step 1/10 : FROM golang:1.12-alpine + ---> 6a17089e5a3a +Step 2/10 : RUN apk add --no-cache git + ---> Using cache + ---> 7b65bc6a6690 +Step 3/10 : WORKDIR /app/go-sample-app + ---> Using cache + ---> 8755d1490fe2 +Step 4/10 : COPY go.mod . + ---> Using cache + ---> 476d868ceddd +Step 5/10 : COPY go.sum . + ---> Using cache + ---> 3239097e9bde +[...] +{% endraw %} +{% endhighlight %} + +In a distributed build environment however, things work much differently as each build node has its own cache. If you run a pipeline on one node and then run a second build on another node everything will be recreated again because (normally) build nodes don't share any cache. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/no-distributed-layer-cache.png" +url="/images/pipeline/caching/no-distributed-layer-cache.png" +alt="Without a distributed docker layer cache" +caption="Without a distributed docker layer cache" +max-width="60%" +%} + +In the example above if you run another build that is picked up by build node 18 all Docker filesystem layers will be recreated again even though they are already present in other nodes. + +Codefresh is one of the few CI/CD solutions that has a *distributed* Docker layer cache. This makes layer caching available to all build nodes. It doesn't matter any more which build node runs which pipeline as all of them are equal regarding their caching capabilities. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/distributed-layer-cache.png" +url="/images/pipeline/caching/distributed-layer-cache.png" +alt="Wit a distributed docker layer cache" +caption="With a distributed docker layer cache" +max-width="60%" +%} + +With the distributed docker layer cache all build nodes are now equal. Any of the available nodes can pick your next pipeline build as all of them have access to all the previous docker filesystem layers. + +You can see if this cache is used in your [pipeline logs]({{site.baseurl}}/docs/pipelines/steps/build/): + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/distributed-docker-layer-cache.png" +url="/images/pipeline/caching/distributed-docker-layer-cache.png" +alt="Docker layer caching regardless of build node" +caption="Docker layer caching regardless of build node" +max-width="60%" +%} + +Codefresh will also automatically pass the `--cache-from` directive to docker builds with the previous successful build artifacts: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/cache-from.png" +url="/images/pipeline/caching/cache-from.png" +alt="Distributed version of `--cache-from`" +caption="Distributed version of `--cache-from`" +max-width="60%" +%} + +To take advantage of this build cache just follow the official Docker guidelines and best practices such as + +* Download dependencies in a separate docker layer +* Put layers that will not change frequently at the top of dockerfile (e.g. OS libs) +* Put things that will change frequently at the bottom of the dockerfile (e.g. source code) +* Don't use side effects in Dockerfiles + +Basically, if your Dockerfile is already optimized on your local workstation, it should also be optimized for Codefresh. More information can be found in the official documentation: + +* [https://www.docker.com/blog/intro-guide-to-dockerfile-best-practices/](https://www.docker.com/blog/intro-guide-to-dockerfile-best-practices/) +* [https://docs.docker.com/develop/develop-images/dockerfile_best-practices/](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) + +## Docker registry caching + +This is a caching mechanism unique to Codefresh and applicable only to [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) when any of [connected Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) is used. + +Codefresh will check the internal Docker registry *before* a build step and if the exact same image is found (using the image hash), it will skip the build step completely: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/skip-build.png" +url="/images/pipeline/caching/skip-build.png" +alt="Skipping a previously built Docker image" +caption="Skipping a previously built Docker image" +max-width="60%" +%} + +This is a very effective way to cut down the amount of time needed by pipelines but it obviously works only for Docker images that don't change often (helper images, plugins, build tools etc.) as the deployment docker images will always be different when a new git commit happens in the source code. + +You can take advantage of this mechanism by [not mixing deployment docker images with development docker images](https://codefresh.io/containers/docker-anti-patterns/). The former will change all the time, while the latter should be recreated less often. + +## Traditional build caching + +If you have read the [introduction to pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) page you will already be familiar with the shared volume that is automatically mounted on all pipeline steps. This volume is not only used for data exchange between steps of the same pipeline, but is also stored/fetched for each subsequent build as well. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/pipeline-volume-caching.png" +url="/images/pipeline/caching/pipeline-volume-caching.png" +alt="Pipeline workspace caching" +caption="Pipeline workspace caching" +max-width="90%" +%} + +This means that unlike other CI solutions where you have to manually describe what folder you wish to cache, in Codefresh **everything that exists in `/codefresh/volume` and its subfolders is automatically cached between different builds** of the same pipeline. The volume mounting and caching/restoring process is completely automatic. You don't need any configuration about it. + +The main choice that you have is which files to place on the volume. For example, Node.js uses the folder `node_modules` for its dependencies which are placed under the project folder [which is automatically placed under the volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code). So all contents of `node_modules` will be cached by default without any further action on your part. + +>Note that if you are using [Codefresh on-prem]({{site.baseurl}}/docs/installation/codefresh-on-prem/), this kind of caching is not available for the built-in runtime and you need to use the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) +with your own runtime to activate volume caching. + +The simplest way to see this caching mechanism in action is this pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + write_sample_file: + title: Writing to shared volume + image: alpine:3.10.3 + commands: + - date >> /codefresh/volume/sample.txt + read_sample_file: + title: Reading from shared volume + image: alpine:3.10.3 + commands: + - cat /codefresh/volume/sample.txt +{% endraw %} +{% endhighlight %} + +If you run this pipeline multiple times you will see multiple entries in the file `sample.txt`. + +>Note that if you run concurrent builds too quickly after one another, the Codefresh Volume will refresh [from scratch]({{site.baseurl}}/docs/pipelines/pipeline-caching/#issues-with-parallel-builds-and-parallel-pipelines) instead of being cached between builds. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/codefresh-shared-volume.png" +url="/images/pipeline/caching/codefresh-shared-volume.png" +alt="Shared volume after 3 builds of the same pipeline" +caption="Shared volume after 3 builds of the same pipeline" +max-width="60%" +%} + +Notice also the complete lack of `volume` directives in the `codefresh.yml` file. The pipeline volume is mounted and cached/restored by Codefresh with no configuration on your part. + +Some important points on this caching mechanism: + +* The volume is handled and managed by Codefresh in a completely transparent manner. You **DO NOT** need any `volume` directives in your pipelines to take advantage of it. The volume is even present in [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) for integration tests. +* On each build the [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) will purge/delete everything that is not placed in `.gitignore`. So make sure that your `.gitignore` files contain all the things that you want to see cached (e.g. `node_modules`) +* If you use the SAAS version of Codefresh, volumes will be reused across all your account pipelines. If you use the On-prem or Hybrid version of Codefresh, pipeline volumes can be scoped to different pipelines or triggers as well +* You need at least one build of your pipeline in order for the cache mechanism to take any effect. +* The volume is **NOT available** in [build steps]({{site.baseurl}}/docs/pipelines/steps/build/). This is not a Codefresh limitation. Docker itself [does not allow volumes during builds](https://github.com/moby/moby/issues/14080). There is no folder `/codefresh/volume` inside a Dockerfile for you to access. +* This is the only caching mechanism that is not related to Docker images. So if you compile/package a traditional application with Codefresh that is not packaged as a Docker image this is the only way to get faster builds. + +See also a [full example]({{site.baseurl}}/docs/yaml-examples/examples/shared-volumes-between-builds/) that uses the volume at [https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds](https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds). + +### Caching folders which are outside your project folder + +By default if you checkout a Git project named `foo`, the source code [is placed under]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) `/codefresh/volume/foo`. This means that with zero configuration the following things are cached: + +* your source code of `foo` project +* all dependencies under the project folder (e.g. `foo/node_modules`) +* all project logs, test results that are inside the project module. + +Everything else found in external folders is NOT cached by default. So if you have things in folders such as `/root`, `/tmp/`, `/home/`, `/var/` that you need to cache you need to manually copy them to the volume. + +In practice, this means that you need to look at the documentation of your build system and test framework and make sure that all folders you want cached are placed under the Codefresh volume. This is a typical pattern with Java applications. + + * For Maven use `mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package` as shown in the [example]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/). + * For Gradle use `gradle -g /codefresh/volume/.gradle -Dmaven.repo.local=/codefresh/volume/m2` as explained in the [example]({{site.baseurl}}/docs/learn-by-example/java/gradle/). + * For SBT use `-Dsbt.ivy.home=/codefresh/volume/ivy_cache`. + * For Pip use `pip install -r requirements.txt --cache-dir=/codefresh/volume/pip-cache` as shown in the [example]({{site.baseurl}}/docs/learn-by-example/python/django/) + * For Golang pass an environment variable `GOPATH=/codefresh/volume/go` to the freestyle step that is running go commands + * For Rust pass an environment variable `CARGO_HOME=/codefresh/volume/cargo` to the freestyle step that is running rust/cargo commands + + This is only needed for traditional applications that are not dockerized. If you already use Docker containers the previous caching mechanisms are already enough. + +### Issues with parallel builds and parallel pipelines + +Codefresh supports two forms of parallelism, parallel steps within the same pipeline and parallel pipelines (as well as concurrent builds). + +All parallel steps inside the same pipeline use the same volume. Codefresh [does not perform any conflict detection in that case]({{site.baseurl}}/docs/pipelines/advanced-workflows/#shared-codefresh-volume-and-race-conditions). + +For concurrent builds of the same pipeline, notice that if you make too many commits very fast (triggering a second build while the previous one is still running), Codefresh will allocate a brand new volume for the subsequent builds. This will force all builds to start with a clean shared volume, resulting in longer build times. Be sure to set your [build termination settings]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) correctly. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/concurrent-build-caching.png" +url="/images/pipeline/caching/concurrent-build-caching.png" +alt="Concurrent build caching" +caption="Concurrent build caching" +max-width="80%" +%} + +The diagram above shows the following sequence of events: + +1. The first build of a pipeline is triggered. Codefresh allocates a brand new volume and automatically mounts is as a workspace at `/codefresh/volume`. +1. The first build runs and stores artifacts on the volume +1. The first build finishes. Codefresh stores the volume in the cache +1. A second build is triggered for the same pipeline and same git branch. Codefresh sees that there is already a volume in the cache and passes it to the second build. The second build correctly finds all artifacts in the cache +1. *Before the second build finishes*, a third build is triggered. +1. The pipeline volume is still locked by the second build and Codefresh cannot use it in the third build. Codefresh allocates a **brand new volume** that has no artifacts at all and passes it to the third build +1. The second build finishes and its volume is saved into cache +1. The third build finishes and its volume is saved into cache *overwriting* the volume of the second build. +1. If a fourth build starts it will use the volume from the third build since this was the last saved volume. + + + +## Codefresh cache size and eviction policy + +If you use the SAAS version of Codefresh, then you don't have any control of cache policies. +The SAAS version is fully controlled by Codefresh personnel and the cache policies in place might clear caches sooner than you think. + +If you run a pipeline very infrequently it is possible to suffer many cache misses. If you also use obscure Docker images you might see them downloaded again and again. + +If you run the [hybrid or on-prem versions]({{site.baseurl}}/docs/enterprise/installation-security/) of Codefresh, then your system administrator is responsible for fine-tuning the cache settings. + +## Related articles +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Parallel pipelines]({{site.baseurl}}/docs/pipelines/advanced-workflows/) diff --git a/_docs/pipelines/pipelines.md b/_docs/pipelines/pipelines.md new file mode 100644 index 000000000..9c0b20613 --- /dev/null +++ b/_docs/pipelines/pipelines.md @@ -0,0 +1,334 @@ +--- +title: "Creating pipelines" +description: "Define pipelines in Codefresh" +group: pipelines +redirect_from: + - /docs/pipeline + - /docs/pipeline/ + - /docs/pipelines + - /docs/pipelines/ + - /docs/pipelines/introduction/ + - /docs/pipelines/introduction + - /docs/inline-yaml-editing + - /docs/inline-yaml-editing/ +toc: true +--- + +Before creating a pipeline, make sure you are familiar with the theory behind [Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/). + +## Pipeline concepts + +The aim of Codefresh pipelines is to have re-usable sequences of steps that can be used for different applications (or micro-services) via the use of Git triggers. + +The main concepts are shown below: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/concepts.png" +url="/images/pipeline/create/concepts.png" +alt="Pipeline concepts" +caption="Pipeline concepts" +max-width="60%" +%} + +* **Projects**: The top-level concept in Codefresh CI/CD. Projects are used to group related CI pipelines. In most cases, a single project will be a single application that itself contains many microservices. You are free to use projects as you see fit. For example, you could create a project for a specific Kubernetes cluster or for a specific team/department. + +* **Pipelines**: Each project can have multiple pipelines. Pipelines that belong to a single project can be managed as a unit. You can also create a new pipeline by copying an existing pipeline. Notice that unlike other CI solutions, a pipeline in Codefresh is **NOT** tied to a specific Git repository. You should try to make your pipelines generic enough so that they can be reused for similar applications even when they exist in different Git repositories (a fairly typical setup for microservices). + +* **Pipeline steps**: Each pipeline has a definition that defines the [pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) that are executed each time the pipeline is triggered. The definition of a pipeline is described in a special [codefresh.yml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) file. The `codefresh.yml` file can be fetched from the same repository as that of the source code, from a completely different repository, or even defined in-place in the Codefresh pipeline editor. Again, notice you can have a pipeline that checks out its source code from Git repository A, but actually defines its steps in a `codefresh.yml` file that is fetched from Git repository B. + +* **Triggers**: A pipeline can have zero, one, or many [triggers]({{site.baseurl}}/docs/pipelines/triggers/). Triggers are the linking medium between a pipeline and a Git repository. Codefresh supports several kinds of triggers such as Git, Cron, and Docker push triggers. +Triggers that happen with Git webhooks can come from the same Git repository that contains the git code, **OR**, a completely different repository. You can have a pipeline with multiple triggers to be executed when a code change happens to any of them. + +With these basic building blocks, you can define many complex workflows. In particular, it is very easy in Codefresh to create a scenario where: + +1. A pipeline is launched because a trigger exists for Git repository A +1. The pipeline reads its `codefresh.yml` file from Git repository B +1. The pipeline clones source code from Git repository C (and starts packaging/compiling it) + +Of course, you can also have a simpler scenario where the trigger, the pipeline steps and the source code of the application are all defined for the same Git repository. + + +## Creating a pipeline + +You can create new projects by clicking on *Projects* in the left sidebar and then selecting the *New Project* button on the top right corner. A dialog will appear that will ask you for the project name and optional tags that you can use for [access control]({{site.baseurl}}/docs/enterprise/access-control/). + +Once you are inside the project view you can start editing pipelines with a UI environment that works similar to a traditional IDE. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/pipeline-manager.png" +url="/images/pipeline/create/pipeline-manager.png" +alt="Pipeline manager" +caption="Pipeline manager" +max-width="70%" +%} + +1. On the top left you can see your current project. You can also change it by clicking on the drop-down on the top left corner. + +1. On the left side of the screen you can see all pipelines that currently belong to this project. Click on each one to edit it. +On the bottom part of this panel the *New pipeline* button allows you to create a new pipeline on the same project either from scratch +or by copying an existing one from the same project or a completely different project. + +1. The name of the currently edited pipeline is shown at the top of the window. + +1. The main window shows the definition of the current pipeline. The screenshot shows the inline editor but pipelines can also be defined from external files (checked into source control) as explained later. + +1. The right part of the window shows extra settings for this pipeline such as [predefined steps]({{site.baseurl}}/docs/codefresh-yaml/steps/), [triggers]({{site.baseurl}}/docs/pipelines/triggers/) and launch variables/parameters. + + + + +### Using the Inline pipeline editor + +When first creating a pipeline you will see an inline editor that allows you to define the [pipeline yml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) right there in the Codefresh UI. This is great when you are starting a new project because it offers you really quick feedback. You can edit the yml steps, run a build, edit again, run a build and so on. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/inline-editor.png" +url="/images/pipeline/create/inline-editor.png" +alt="Inline Pipeline editor" +caption="Inline Pipeline editor" +max-width="60%" +%} + +On the top right of the panel you have additional controls: + +* The *import* button allows you to bring a `codefresh.yml` from your local workstation into the editor +* The *comment* button allows you to quickly comment/uncomment the currently selected text. The hotkey `Ctrl-/` also performs the same action +* The *formatting* button enriches the editor with special symbols for line breaks, spaces and tabs. This allows you to easily fix common formatting errors +* The *copy* button quickly copies the **whole** pipeline text in your clipboard +* You can use `Ctrl-]` and `Ctrl-[` to change indentation of the current line (use the Command key instead on MacOsX) + + +Notice that in the editor you can expand/collapse individual yaml blocks using the arrow triangles on the left of each blocks. The initial pipeline presented in the editor is suggested by Codefresh according to the contents of your Git repository. + +> You can also see the suggested Codefresh pipeline for any public git repository by using the [analyze option](https://codefresh-io.github.io/cli/analyzer/){:target="\_blank"} of the Codefresh CLI. + + +## Loading codefresh.yml from Version Control + +Working with the inline editor is very convenient in the beginning, but it makes your pipeline definition only exist within the Codefresh UI and therefore goes against the basic principles of [infrastructure as code](https://en.wikipedia.org/wiki/Infrastructure_as_Code){:target="\_blank"}. Once you are happy with how your pipeline works you should commit it to a Git repository (which can be the same one that has the source code of the application or a completely different one). + +You can click on the *Inline YAML* header and switch it to *Use YAML from URL* or *Use YAML from Repository*. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/pipeline-from-internal-repo.png" +url="/images/pipeline/create/pipeline-from-internal-repo.png" +alt="Pipeline from internal repo" +caption="Pipeline from internal repo" +max-width="60%" +%} + +You can then copy and paste a URL to a raw Codefresh YAML file. This will allow you to load a Codefresh YAML from any public URL. Notice that a raw URL is needed in the case of GitHub. + +As an example, instead of using `https://github.com/codefresh-contrib/example-voting-app/blob/master/codefresh.yml` you should enter `https://raw.githubusercontent.com/codefresh-contrib/example-voting-app/master/codefresh.yml` + +## Pipeline settings + +Once you create your pipeline you can also click on the top tab called *Settings* for some extra parameters. + +### General + +- **Pipeline Name**: The name of your pipeline (useful for working with the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"}) +- **Pipeline ID**: The ID of your pipeline (useful for working with the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"}) + > When working with the Codefresh CLI, the Pipeline Name and ID are interchangeable. +- **Pipeline Description**: Freetext pdescription of the pipeline. +- **Pipeline Tags**: One or more tags used for [access control]({{site.baseurl}}/docs/administration/access-control/) +- **Public Build Logs**: If enabled, [users without a Codefresh account]({{site.baseurl}}/docs/pipelines/build-status/#public-build-logs) can view the builds of this pipeline. +- **Template**: Convert this pipeline to a template (see the next section for details on templates). +- **Badges**: Simple images that show you the last [build status]({{site.baseurl}}/docs/pipelines/build-status/). + +### Policies + +- **Pipeline Concurrency**: The maximum number of concurrent builds (0-14 or unlimited). Set the concurrency when your pipeline has only one trigger. + > A Pipeline Concurrency of **0** freezes execution of the pipeline, switching it to maintenance mode. Use this concurrency setting to modify existing pipelines and freeze execution until you complete the changes. +- **Trigger Concurrency**: The maximum number of concurrent builds per trigger (1-15 or unlimited). Define the trigger concurrency when your pipeline has multiple triggers. +- **Branch Concurrency**: The maximum number of concurrent builds per branch (1-15 or unlimited). Define this when your pipeline can build different branches. +- **Build Termination**: Options that determine when a build from the pipeline should terminate: + - Once a build is created terminate previous builds from the same branch + - Once a build is created terminate previous builds only from a specific branch (name matches a regular expression) + - Once a build is created, terminate all other running builds + - Once a build is terminated, terminate all child builds initiated from it +- **Pending approval volume**: Choose what happens with the [pipeline volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) when a pipeline is waiting for [approval]({{site.baseurl}}/docs/pipelines/steps/approval/#keeping-the-shared-volume-after-an-approval) + - Keep the volume available + - Discard the volume + - Honor the option defined globally in your Codefresh account +- **Pending approval concurrency limit effect**: Determines if a build that is pending approval [counts against]({{site.baseurl}}/docs/pipelines/steps/approval/#define-concurrency-limits) the concurrency limits or not + - Builds in pending approval will **not** be counted when determining the concurrency limit for a pipeline + - Builds in pending approval will **be** counted when determining the concurrency limit for a pipeline + - Honor the option defined globally in your Codefresh account + +The **Pipeline and Trigger Concurrency** limits are very important as they allow you to define how many instances of a pipeline can run in parallel when multiple commits or multiple pull requests take place. + +> Notice that these limits are *unrelated* to [parallelism within a single pipeline]({{site.baseurl}}/docs/pipelines/advanced-workflows/). + +Some common scenarios are: + +* a pipeline that uses a shared resource such as a database or queue and you want to limit how many pipelines can access it +* a pipeline that deploys to a single production environment (in most cases you only want one active pipeline touching production + +The **Build Termination** settings are useful for pipelines where you commit too fast (i.e. faster then the actual runtime of the pipeline). +All these settings allow you to lesser the build instance for pipelines when too many triggers are launched at the same time. +You will find them very useful in cases where too many developers are performing small commits and builds take a long time to finish (i.e. build takes 10 minutes to finish and developers perform multiple pushes every 2 minutes) + +Some common scenarios are: + +* You are interested only on the latest commit of a branch. If pipelines from earlier commits are still running you want to terminate them. +* You don't want to wait for children pipelines to finish (i.e. when a pipeline calls another pipeline) or when a new build starts for a parent pipeline. + +For the volume behavior during approvals, notice that if [you keep the volume available]({{site.baseurl}}/docs/pipelines/steps/approval/#keeping-the-shared-volume-after-an-approval) on the pipeline while it is waiting for approval it will still count as "running" against your pricing tier limit. + +### External resources + +In a big organization you might have some reusable scripts or other resources (such as Dockerfiles) that you want to use in multiple pipelines. Instead of fetching them manually in freestyle steps you can simply define them as *external resources*. When a pipeline runs, Codefresh will fetch them automatically and once the pipeline starts the files/folders will already be available in the paths that you define. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/external-resources.png" +url="/images/pipeline/create/external-resources.png" +alt="Bringing external resources into a pipeline" +caption="Bringing external resources into a pipeline" +max-width="80%" +%} + +Currently Codefresh supports the automatic fetching of files or folders from another Git repository. To create an external resource click the *Add Resource* button and choose: + +* The Git repository that contains the files/folder you wish to bring in the pipeline workspace +* The branch from the Git repository that contains the files/folders you wish to bring in the pipeline workspace +* The source folder in the GIT repo (use relative path) +* The target folder in the pipeline workspace where the file folder will be copied to (use absolute path) + +Once the pipeline starts, all files will be available to all freestyle steps in the paths mentioned in the target folder field. +You can define multiple external resources in a single pipeline. + +### Runtime + +- **Runtime Environment**: (by default this is set to SaaS) +- **Runtime OS**: (by default this is set to Linux) +- **Resources Size**: + - Small (recommended for 1-2 concurrent steps) + - Medium (recommended 3-4 steps) + - Large (recommended 5-6 steps) + +#### Set minimum disk space for a pipeline build +To speed up builds and improve performance, Codefresh caches different types of data during pipeline execution for reuse across builds. Image-caching is one example of cached data, where Codefresh pulls the required images during the first build and caches them for reuse in future builds. For more info, see [Pipeline caching]({{site.baseurl}}docs/pipelines/pipeline-caching). +Because a portion of the disk space is already utilized by cache, a build can run out of disk space and fail with the 'no space left on device' error. + +To prevent out-of-space scenarios that lead to failed builds, you can set the minimum disk space you need for the pipeline's build volume. Defining the minimum disk space ensures that Codefresh assigns either a cached disk with sufficient disk space or a new empty disk at the start of the build. + +The disk space set for the pipeline is inherited by all the builds run for the pipeline. +You can also configure the disk space for a [specific trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#set-minimum-disk-space-for-build-volume-by-trigger) used by the pipeline or for a specific run, and override what's set for the pipeline. + +1. Select the pipeline for which to set the disk space. +1. Select **Settings**, and then **Runtime**. +1. Enable **Set minimum required disk space** and either retain the default displayed or change as needed. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/set-build-disk-space.png" +url="/images/pipeline/create/set-build-disk-space.png" +alt="Set disk space for pipeline builds" +caption="Set disk space for pipeline builds" +max-width="60%" +%} + +> Track the actual disk usage in Builds > Metrics. + + +## Using Pipeline Templates + +Codefresh also supports the creation of pipeline "templates", which are blueprints for creating new pipelines. +To enable the creation of pipelines from templates first visit the global pipeline configuration at [https://g.codefresh.io/account-admin/account-conf/pipeline-settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings){:target="\_blank"} and toggle the *Enable Pipeline Templates* button. + +The easiest way to create a new template is by clicking the "3 dots menu" on the pipeline name: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/create-template-menu.png" +url="/images/pipeline/create/create-template-menu.png" +alt="Create template from pipeline" +caption="Create template from pipeline" +max-width="30%" +%} + +From the dialog you can select if you want to copy this pipeline as a brand new template, or simply convert the pipeline itself to a template: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/template-dialog.png" +url="/images/pipeline/create/template-dialog.png" +alt="Template options" +caption="Template options" +max-width="80%" +%} + +Once the template is created, you can edit it like any other pipeline. Pipeline templates are marked with the `template` tag and also have a special mark in the pipeline menu: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/template-tag.png" +url="/images/pipeline/create/template-tag.png" +alt="Identify pipelines used as templates" +caption="Identify pipelines used as templates" +max-width="90%" +%} + +Now when you create a new pipeline, you can also select which pipeline template will be used as an initial pipeline definition: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/use-template.png" +url="/images/pipeline/create/use-template.png" +alt="Using a template" +caption="Using a template" +max-width="70%" +%} + +>Notice that templates only take effect during pipeline creation. Changing a template afterwards, has no effect on pipelines that are already created from it. + +You can also quickly convert a pipeline to a template, by visiting the pipeline settings and clicking the *template* button under the *General* tab. + + +## Pipelines that do not belong to any project + +Although we recommend adding all your pipelines to a project, this is not a hard requirement. You can create pipelines that do not belong to a project from the *Pipelines* section on the left sidebar. +If you have a Codefresh account created before May 2019 you might already have several pipelines that are like this. + +If you change your mind, you can also add detached pipelines (i.e. pipelines that are not part of a project) manually from the 3-dot menu that is found on the right of each pipeline. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/add-pipeline-to-project.png" +url="/images/pipeline/create/add-pipeline-to-project.png" +alt="Changing the project of a pipeline" +caption="Changing the project of a pipeline" +max-width="90%" +%} + +Pipelines that belong to a project will mention it below their name so it is very easy to understand which pipelines belong to a project and which do not. + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[External Docker Registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +[YAML Examples]({{site.baseurl}}/docs/yaml-examples/examples/) + + + + + diff --git a/_docs/pipelines/post-step-operations.md b/_docs/pipelines/post-step-operations.md new file mode 100644 index 000000000..ae98ebf51 --- /dev/null +++ b/_docs/pipelines/post-step-operations.md @@ -0,0 +1,116 @@ +--- +title: "Post-step operations" +description: "Annotate your builds and run extra steps" +group: pipelines +sub_group: steps +redirect_from: + - /docs/post-step-operations/ +toc: true +--- +Post-step operations are a set of optional predefined processes that can be configured on any step. These operations will be executed once the step has completed. The post-step operations allow you to annotate your builds, images and pipelines with extra metadata or run other steps. + + +## Result Aware Post-Step Operations +You may execute post-step operations conditionally, based on the outcome of the step itself. + +To execute operations only when the step has completed successfully, use `on_success`: + + +{% highlight yaml %} +step_name: + ... + on_success: + ... +{% endhighlight %} + +To execute operations only when the step has failed, use `on_fail`: + + +{% highlight yaml %} +step_name: + ... + on_fail: + ... +{% endhighlight %} + +## Result Agnostic Post-Step Operations +You may execute post-step operations regardless of the outcome of the step itself. + +To execute operations regardless of the result, use `on_finish`: + + +{% highlight yaml %} +step_name: + ... + on_finish: + ... +{% endhighlight %} + +## Available Post-Step Operations + +- [Image Metadata]({{site.baseurl}}/docs/docker-registries/metadata-annotations/) +- [Custom Annotations]({{site.baseurl}}/docs/codefresh-yaml/annotations/) +- [Hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/) + +## Example + +Marking a Docker image with the results of unit tests: + +{% highlight yaml %} +{% raw %} +build_step: + title: Building My Docker image + type: build + image_name: my-app-image + tag: 1.0.1 + dockerfile: Dockerfile +run_tests: + title: Running unit tests + image: ${{build_step}} + commands: + - npm install + - npm run test + on_success: # Execute only once the step succeeded + metadata: + set: + - ${{build_step.imageId}}: + - unit_tests: passed +{% endraw %} +{% endhighlight %} + +## Running other steps + +If you want to run another step in the pipeline when another step fails or succeeds you need to use [conditional execution of steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) and the `fail_fast` property. You can also use [step hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/) for dedicated post step actions. + +{% highlight yaml %} +{% raw %} +run_tests: + title: Running unit tests + image: node:11 + fail_fast: false + commands: + - npm install + - npm run test +print_error_message: + image: alpine:latest + title: Marking pipeline status + commands: + - echo "Unit tests failed" + when: + condition: + all: + myCondition: run_tests.result == 'failure' +{% endraw %} +{% endhighlight %} + +In this example the step `print_error_message` will only run if step `run_tests` has failed. + +See also [advanced workflows]({{site.baseurl}}/docs/codefresh-yaml/advanced-workflows/#single-step-dependencies) and [Pipeline/Step hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/). + +## Related articles +[Conditional execution of steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) +[Working directories]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Annotations in CI pipelines]({{site.baseurl}}/docs/pipelines/annotations/) +[Hooks in CI pipelines]({{site.baseurl}}/docs/pipelines/hooks/) + + diff --git a/_docs/pipelines/running-pipelines-locally.md b/_docs/pipelines/running-pipelines-locally.md new file mode 100644 index 000000000..fbd0ba6ad --- /dev/null +++ b/_docs/pipelines/running-pipelines-locally.md @@ -0,0 +1,124 @@ +--- +title: "Running pipelines locally" +description: "How to run Codefresh pipelines on your workstation" +group: pipelines +toc: true +redirect_from: + - /docs/troubleshooting/common-issues/debugging-codefresh-builds-locally/ + - /docs/troubleshooting/common-issues/access-and-debug-the-pipeline-volume-image/ +--- + +Codefresh can run your pipelines locally. This is very handy when you need to debug a pipeline, or when you want to do quick changes to the [codefresh.yml file]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) with the fastest turn-around time possible. + +## Prerequisites + +You need to have Docker installed on your local workstation. You can follow the [official instructions](https://docs.docker.com/install/) to install it. Notice that if you use Linux, the Docker version offered by your native +package manager is not always the latest version. + +Once docker is installed, check that it runs correctly with: + +``` +docker run hello-world +``` + +You should get a short welcome message. + +>At the time of writing local builds can only run on Linux and Mac workstations. We are working to remove this limitation and allow developers with Windows machines to also run Codefresh pipelines locally. + +Then install the [open-source Codefresh CLI](https://codefresh-io.github.io/cli/installation/) and [setup authentication](https://codefresh-io.github.io/cli/getting-started/) with your Codefresh account. + +Once this is done check that your account is locally accessible by running + +``` +codefresh get pipelines +``` + +You should see a long list with your pipelines on the terminal output. + +## Running a pipeline locally + +The Codefresh Command Line Interface (CLI) comes with a [run parameter](https://codefresh-io.github.io/cli/pipelines/run-pipeline/) that allows you to trigger pipelines externally (outside the Codefresh UI). + +Normally, if you run a pipeline this way the CLI will just trigger it remotely (the pipeline itself will still run in the Codefresh infrastructure). + +You can pass however the `--local` option, and this will instruct the CLI to automatically: + +1. Download the Codefresh build engine locally to your workstation (which itself is a docker image at [codefresh/engine](https://hub.docker.com/r/codefresh/engine)) +1. Run the build locally using the Codefresh engine on your workstation +1. Print all build logs to your terminal + +Note that the engine has transparent network access to all the other settings in your Codefresh account and therefore will work exactly the same way as if it was run on Codefresh infrastructure (e.g. use the connected Docker registries you have setup in the UI) + +Here is a full example: + +``` +codefresh run francisco-codefresh/jan_19/my-basic-pipeline --local -b master -t my-trigger +``` + + + +### Keeping the pipeline volume in the local workstation + +If you are familiar with +[how Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) you should know about the unique docker volume that is automatically shared between all pipeline steps. + +This volume (which also includes the project folder) makes data sharing between all steps very easy (e.g. with thing such as test reports or binary dependencies). + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume.png" +url="/images/pipeline/introduction/codefresh-volume.png" +alt="Codefresh volume" +caption="All steps share the same volume" +max-width="80%" +%} + +By default, if you run a Codefresh pipeline locally, this shared volume will automatically be discarded at the end of the build. You can still keep the volume after the build by adding the `--local-volume` parameter in your [run command](https://codefresh-io.github.io/cli/pipelines/run-pipeline/). Here is an example: + +``` +codefresh run francisco-codefresh/jan_19/my-basic-pipeline --local --local-volume -b master -t my-trigger +``` + + +Once the build runs you will see in your terminal the path that holds the contents of the volume: + +``` +[...build logs...] +Using /Users/fcocozza/.Codefresh/francisco-codefresh/jan_19/my-basic-pipeline as a local volume. +[...more build logs] +``` + +After the build has finished you can freely explore this folder in your filesystem with any file manager. + +``` +$ ls -alh /Users/fcocozza/.Codefresh/francisco-codefresh/jan_19/my-basic-pipeline/ +total 16 +drwxr-xr-x 5 fcocozza staff 160B Jan 14 12:52 . +drwxr-xr-x 3 fcocozza staff 96B Jan 14 12:52 .. +-rwxr-xr-x 1 fcocozza staff 388B Jan 14 12:52 cf_export +-rw-rw-r-- 1 fcocozza staff 189B Jan 14 12:52 env_vars_to_export +drwxr-xr-x 5 fcocozza staff 160B Jan 14 12:52 jan_19 +``` +This way you can verify if the pipeline has access to the data you think it should have + + +### Using a custom codefresh.yml file + +The ultimate way to run a pipeline locally is to override completely the `codefresh.yml` file it uses. A pipeline by default will read its steps from the respective file in git. + +You can force it to ignore that git version of the pipeline spec and instead load a custom `codefresh.yml` from your local file-system (which might not be even committed yet). + +The extra parameter is `--yaml` in that case. +Here is a complete example + +``` +codefresh run francisco-codefresh/jan_19/my-basic-pipeline --local --local-volume --yaml=my-codefresh.yml -b master -t my-trigger +``` + +When this pipeline runs locally, it will use whatever steps exist in `my-codefresh.yml` instead of the git version. The shared data volume will also be left intact after the build is finished as explained in the previous section. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) diff --git a/_docs/pipelines/service-containers.md b/_docs/pipelines/service-containers.md new file mode 100644 index 000000000..a60222153 --- /dev/null +++ b/_docs/pipelines/service-containers.md @@ -0,0 +1,570 @@ +--- +title: "Service containers in pipelines" +description: "How to use sidecar services in your pipelines" +group: pipelines +toc: true +--- + +Sometimes you wish to run sidecar containers in a pipeline that offer additional services for your builds. The most common scenario is launching services such as databases in order to accommodate [integration tests]({{site.baseurl}}/docs/testing/integration-tests/). Or you might wish to launch the application itself in order to run integration tests **against** it as part of the pipeline. + +>Note that while [composition steps]({{site.baseurl}}/docs/codefresh-yaml/steps/composition/) are still supported, the recommended way to run integrations tests going forward is with service containers. The underlying implementation is shared so check the composition documentation page for more available options +and properties. + +Codefresh includes a handy mechanism (based on Docker compose) that can help you run sidecar containers along your main pipeline. Here is a very simple example. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_database + composition: + my-redis-db-host: + image: redis:latest + ports: + - 6379 +steps: + my_integration_tests: + image: my-app-image + title: Running integration tests + commands: + - npm run test + services: + - my_database +{% endraw %} +{% endhighlight %} + +This pipeline will run integration tests during the freestyle step called `my_integration_tests` and at that point a Redis instance will be available at hostname `my-redis-db-host` and port 6379. Note how in this example, the service container is placed at the root of the pipeline (as opposed to inside a specific step). This ensures that the Redis instance is running for [the duration of the pipeline]({{site.baseurl}}/docs/codefresh-yaml/service-containers/#running-services-for-the-duration-of-the-pipeline). + +>Service Containers are based on Docker Compose. This document does not have the complete list of available options available. Please refer to Docker Compose versions [2](https://docs.docker.com/compose/compose-file/compose-file-v2/) and [3](https://docs.docker.com/compose/compose-file/), but not point releases such as 2.1. + + +## Viewing Service containers + +The service containers have their own output tab in Codefresh UI + +{% include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/services/services-tab.png" + url="/images//pipeline/codefresh-yaml/services/services-tab.png" + alt="Output tab from extra services" + caption="Output tab from extra services" + max-width="100%" + %} + +This way it is very easy to differentiate between the output logs of the step itself and its supporting container services. + + +## Launching multiple sidecar containers + +Like Docker compose it is possible to launch multiple services this way. For example, let's say that a Java application needs both Redis and MongoDB during integration tests. Here is the respective pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_extra_services + composition: + my-redis-db-host: + image: redis:latest + ports: + - 6379 + my-mongo-db-host: + image: mongo:latest + ports: + - 27017 +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-app" + git: github + revision: "master" + my_tests: + image: maven:3.5.2-jdk-8-alpine + title: "Running Integration tests" + commands: + - 'mvn integration-test' +{% endraw %} +{% endhighlight %} + +The Redis instance will be available through the networks at `my-redis-db-host:6379` while the MongoDB instance will run at `my-mongo-db-host:27017`. + +Instead of mentioning all your services directly in the YAML file you might also reuse an existing composition you have already defined in Codefresh by mentioning it by name. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_extra_services + composition: redis_and_mongo +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-app" + revision: "master" + git: github + my_tests: + image: maven:3.5.2-jdk-8-alpine + title: "Unit tests" + commands: + - 'mvn integration-test' +{% endraw %} +{% endhighlight %} + +This pipeline mentions an existing composition called `redis_and_mongo`: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/existing-composition.png" +url="/images/pipeline/codefresh-yaml/existing-composition.png" +alt="Using an existing composition" +caption="Using an existing composition" +max-width="70%" +%} + +This makes very easy to reuse compositions that you have already defined for other reasons [in the Codefresh UI](https://codefresh.io/docs/docs/testing/create-composition/). + + +## Running services for the duration of the pipeline + +Notice that unlike compositions, the services defined in the root of the pipeline yaml are present for the **whole** pipeline duration. They are available in all pipeline steps. This can be seen in the following example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_database + composition: + my-redis-db-host: + image: redis:latest + ports: + - 6379 +steps: + my_first_step: + image: alpine:latest + title: Storing Redis data + commands: + - apk --update add redis + - redis-cli -u redis://my-redis-db-host:6379 -n 0 LPUSH mylist "hello world" + - echo finished + services: + - my_database + my_second_step: + image: alpine:latest + commands: + - echo "Another step in the middle of the pipeline" + my_third_step: + image: alpine:latest + title: Reading Redis data + commands: + - apk --update add redis + - redis-cli -u redis://my-redis-db-host:6379 -n 0 LPOP mylist + services: + - my_database +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. Starts a single Redis instance +1. Saves some data in the first step on the pipeline +1. Runs an unrelated step (that itself is not using the redis instance) +1. Reads the data saved in the third steps + +If you run this pipeline you will see that that data read in the third step of the pipeline was the same one as the data saved in the first step. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/redis-example.png" +url="/images/pipeline/codefresh-yaml/redis-example.png" +alt="Redis read/write example" +caption="Redis read/write example" +max-width="90%" +%} + +This means that you can easily use the extra services in different steps of a single pipeline, without relaunching them each time (which is what happens with composition steps). + +## Using sidecar services in specific steps + +It is important to understand that any services you launch in a pipeline, are sharing its memory. If for example your pipeline has 4GBs of memory and your service (e.g. a mongdb instance) consumes 1GB, then you only have 3GB available for the actual pipeline. + +It is therefore possible to a assign a service to a specific step if you don't wish to have it running for the duration of the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-example" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-java-app" + dockerfile: "Dockerfile" + tag: latest + my_unit_tests: + image: '${{build_image}}' + title: "Unit tests" + commands: + - 'echo start testing my app' + services: + composition: + my_redis_service: + image: 'redis:latest' + ports: + - 6379 + my_integration_tests: + image: '${{build_image}}' + title: "Integration tests" + commands: + - 'echo start testing my app' + services: + composition: + my_mongo_Service: + image: 'mongo:latest' + ports: + - 27017 +{% endraw %} +{% endhighlight %} + +In this pipeline, the Redis instance is only launched during the Unit test step, while the MongoDB service is active only during integration tests. + +You can also use a `docker-compose.yml` file that you might have in your git repository. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-example" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-java-app" + dockerfile: "Dockerfile" + tag: latest + my_unit_tests: + image: '${{build_image}}' + title: "Unit tests" + commands: + - 'echo start testing my app' + services: + composition: + my_redis_service: + image: 'redis:latest' + ports: + - 6379 + my_integration_tests: + image: '${{build_image}}' + title: "Integration tests" + commands: + - 'echo start testing my app' + services: + composition: 'docker-compose.yml' +{% endraw %} +{% endhighlight %} + +Note that in this case the `docker-compose.yml` file must mention [specific images](https://docs.docker.com/compose/compose-file/#image) (and not use [build properties](https://docs.docker.com/compose/compose-file/#build)). + + +## Launching a custom service + +So far all the examples of extra services used predefined docker images (i.e. Redis and Mongo). You are free however to launch any custom docker image you have already created or even the main application of the pipeline. + +This happens by mentioning a build step as a service image. Here is an example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-back-end" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-backend-app" + tag: latest + dockerfile: "Dockerfile" + run_integration_tests: + title: Test backend + image: 'my-front-end:latest' + commands: + - 'curl my_backend_app:8080' + - 'echo Backend is up. Starting tests' + - npm run integration-test + services: + composition: + my_backend_app: + image: '${{build_image}}' + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +Here a Dockerfile for a backend application is built on the spot and then is launched as sidecar container in the next step (with a hostname of `my_backend_app`). Notice that the `image` property in the sidecar service actually refers to a [Codefresh variable]({{site.baseurl}}/docs/codefresh-yaml/variables/) that holds the name of the build step. + +We then run a `curl` command against the sidecar container to verify the correct health of the application. This is a great way to run integration tests against multiple micro-services. + + +## Checking readiness of a service + +When you launch multiple services in your pipelines, you don't know exactly when they will start. Maybe they will be ready once you expect them, but maybe they take too long to start. For example if you use a MySQL database in your integration tests, your integration tests need to know that the database is actually up before trying to use it. + +This is the same issue that is present in [vanilla Docker compose](https://docs.docker.com/compose/startup-order/). You can use solutions such as [wait-for-it](https://github.com/vishnubob/wait-for-it) to overcome this limitation, but Codefresh offers a better way in the form of *service readiness*. + +With a readiness block you can guarantee that a sidecar service will be actually up before the pipeline will continue. Here is an example: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-back-end" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-backend-app" + tag: latest + dockerfile: "Dockerfile" + run_integration_tests: + title: Test backend + image: 'my-front-end:latest' + commands: + # Backend is certainly up at this point. + - npm run integration-test + services: + composition: + my_backend_app: + image: '${{build_image}}' + ports: + - 8080 + readiness: + image: 'byrnedo/alpine-curl' + timeoutSeconds: 30 + commands: + - "curl my_backend_app:8080" +{% endraw %} +{% endhighlight %} + + +This is an improvement over the previous example because the healthcheck of the back-end is managed by Codefresh. The added `readiness` block makes sure that the back-end service is actually up before the integration tests start by using a `curl` command to check that `my_backend_app:8080` is up and running. Codefresh will run the commands defined in the `readiness` in a loop until they succeed. You are free to use any of your favorite commands there (ping, curl, nc etc) that check one or more services. We also define a timeout for the healthcheck. The `readiness` block supports the following options: + +* `periodSeconds`: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +* `timeoutSeconds`: Number of seconds after which the probe times out. Defaults to 10 seconds. Minimum value is 1. +* `successThreshold`: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for readiness. Minimum value is 1. +* `failureThreshold`: failureThreshold times before giving up. In case of readiness probe the Pod will be marked Unready. Defaults to 3. Minimum value is 1 + +If you know already how [Kubernetes readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) work, then these settings will be very familiar to you. + +Here is another example where we use the `pg_isready` command to make sure that a PostgreSQL database is ready to accept connections +before we run the integration tests. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-rails-app" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-rails-app" + tag: "latest" + dockerfile: "Dockerfile" + run_integration_tests: + image: '${{build_image}}' + commands: + # PostgreSQL is certainly up at this point + - rails db:migrate + - rails test + services: + composition: + my_postgresql_db: + image: postgres:latest + ports: + - 5432 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: 'postgres:latest' + commands: + - "pg_isready -h my_postgresql_db" +{% endraw %} +{% endhighlight %} + +In summary `readiness` make sure that your services are actually up before you use them in a Codefresh pipeline. + +## Preloading data to databases + +A very common scenario when using databases in integration tests is the need to preload some test data in the database. +While you could do that in a normal pipeline step, sidecar services have a special `setup` block for this purpose. This way not only you can make sure that the database is up (using the `readiness` property explained in the previous section) but also that it is preloaded with the correct data. + +To use this capability add a `setup` block in your pipeline service container: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-rails-app" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-rails-app" + tag: "latest" + dockerfile: "Dockerfile" + run_integration_tests: + image: '${{build_image}}' + commands: + # PostgreSQL is certainly up at this point and has the correct data + - rails test + services: + composition: + my_postgresql_db: + image: postgres:latest + ports: + - 5432 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: 'postgres:latest' + commands: + - "pg_isready -h my_postgresql_db" + setup: + image: 'postgres:latest' + commands: + - "wget my-staging-server.exaple.com/testdata/preload.sql" + - "psql -h my_postgresql_db < testdata/preload.sql" +{% endraw %} +{% endhighlight %} + +Notice that in that case the sequence of events is the following + +1. Codefresh will launch the container image(s) mentioned in the composition block +1. The `readiness` block will run until the service image is ready to accept connections +1. The `setup` block will run and preload data or setup any custom commands you have placed in the property +1. The actual pipeline step will now run with the service container attached in the same network. + +## Accessing containers via localhost + +Ideally, your application should be able to access other services by other DNS names that are fully configurable (this is a very good practice for [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) as well). + +Sometimes, however, and especially in legacy applications, your application might be hardcoded to look at other services at `localhost`. +In that case, you can use the attribute `shared_host_network: true` on the services definition. Now all linked containers can access each other's services via localhost. +When `composition: ./docker-compose.yml` is used, this parameter is supported only in on-premises and hybrid environments. In cloud environments, for security reasons, this parameter is ignored. + +Here is an example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + image: goodsmileduck/redis-cli + title: Storing Redis data + commands: + - apk add curl + - 'redis-cli -u redis://localhost:6379 -n 0 LPUSH mylist "hello world"' + - 'curl http://localhost:80' + - echo finished + services: + shared_host_network: true + composition: + my_redis_service: + image: 'redis:latest' + my_nginx: + image: nginx +{% endraw %} +{% endhighlight %} + +You can also do the same thing with top level services: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +services: + name: my_database + shared_host_network: true + composition: + my_redis_service: + image: 'redis:latest' + my_nginx: + image: nginx +steps: + my_first_step: + image: goodsmileduck/redis-cli + title: Storing Redis data + commands: + - apk add curl + - 'redis-cli -u redis://localhost:6379 -n 0 LPUSH mylist "hello world"' + - 'curl http://localhost:80' + - echo finished + services: + - my_database +{% endraw %} +{% endhighlight %} + +Note: we do recommend you only use this option as a last resort. You should not hardcode "localhost" as a requirement in your services as +it adds extra constraints with integration tests (and especially with dynamic test environments). + + +## Limitations + +Service containers are not compatible with [custom pipeline steps]({{site.baseurl}}/docs/codefresh-yaml/steps/#limitations-of-custom-plugins). + + + + +## Related articles +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Integration test with database]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-database/) +[Creating Compositions]({{site.baseurl}}/docs/on-demand-test-environment/create-composition/) + + + + + + + + diff --git a/_docs/pipelines/stages.md b/_docs/pipelines/stages.md new file mode 100644 index 000000000..5ce2d7001 --- /dev/null +++ b/_docs/pipelines/stages.md @@ -0,0 +1,195 @@ +--- +title: "Grouping steps in pipelines" +description: "Group steps into stages for better visualization" +group: pipelines +toc: true +--- + +With Codefresh you can [create really complex pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) with any number of steps. + +To better visualize the pipeline, you can group several steps into a single _stage_. The _stage_ with the group of steps are displayed as a separate column in the [pipeline view]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +url="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +alt="Complex pipeline" +caption="Complex pipeline" +max-width="70%" +%} + +In this example, the pipeline has four stages. + +## Assigning steps to a stage + +Stages are completely optional, and for really small pipelines they are not needed at all. +By default, all pipeline steps are shown one after the other. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/linear-view.png" +url="/images/pipeline/codefresh-yaml/stages/linear-view.png" +alt="Default pipeline view" +caption="Default pipeline view" +max-width="50%" +%} + +This view works ok for small pipelines, but for a big number of steps it is better to group them into pipeline *stages* like shown below: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/example.png" +url="/images/pipeline/codefresh-yaml/stages/example.png" +alt="Different pipeline stages" +caption="Different pipeline stages" +max-width="80%" +%} + +The number of stages (i.e columns) and their titles is completely configurable. +To enable this view, you need to make two modifications at the `codefresh.yml` file: + +Here is the skeleton: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +stages: + - [stage-name-1] + - [stage-name-2] + +steps: + step-name: + [step-contents] + stage: [name-of-stage] + another-step: + [step-contents] + stage: [name-of-stage] + the-very-last-step: + [step-contents] + stage: [name-of-stage] +{% endhighlight %} + +As you can see the modifications needed are: + +1. To list all the stage names at the root of the pipeline file +1. To use the `stage` property on each step to assign it to a stage + +>This updated pipeline view affects only the visualization of the pipeline. It does not affect the order of step execution. Steps are still executed in the same order as listed in the `codefresh.yml` file. + If you wish to use parallel execution and advanced workflows see the [parallel steps]({{site.baseurl}}/docs/codefresh-yaml/advanced-workflows/) page. + + +## Example pipeline with several stages + +Here is a more concrete example that you can use as a starting point: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build + - scan + - integration + - deploy +steps: + step1: + stage: 'prepare' + image: node + commands: + - 'echo "Hello Step 1!"' + step2: + image: node + stage: 'prepare' + commands: + - 'echo "Hello Step 2!"' + step3: + image: node + stage: 'test' + commands: + - 'echo "Hello Step 3!"' + step4: + image: node + stage: 'build' + commands: + - 'echo "Hello Step 4!"' + step5: + image: node + stage: 'scan' + commands: + - 'echo "Hello Step 5!"' + step6: + image: node + stage: 'scan' + commands: + - 'echo "Hello Step 6!"' + step7: + image: node + stage: 'integration' + commands: + - 'echo "Hello Step 7!"' + step8: + image: node + stage: 'deploy' + commands: + - 'echo "Hello Step 8!"' + step9: + image: node + stage: 'deploy' + commands: + - 'echo "Hello Step 9!"' +{% endraw %} +{% endhighlight %} + +If you run the pipeline you will see this view + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/complex.png" +url="/images/pipeline/codefresh-yaml/stages/complex.png" +alt="Complex Pipeline view" +caption="Complex Pipeline view" +max-width="80%" +%} + +Remember that the assignment of a step to a stage is happening only for graphical grouping purposes. It does +not affect the way your steps run. All steps will still run in the same order mentioned in the `codefresh.yml` file. + +Also notice if you enable this view a stage called *default* will show all build steps that are not explicitly assigned to a stage. + +## Using spaces in stage names + +If you wish to have spaces in stage names you need to quote them like this: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my test phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app + dockerfile: Dockerfile + MyUnitTests: + title: Unit testing + stage: 'my test phase' + image: ${{MyAppDockerImage}} + commands: + - npm run test +{% endraw %} +{% endhighlight %} + + +## Related articles +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Parallel workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/) diff --git a/_docs/pipelines/steps.md b/_docs/pipelines/steps.md new file mode 100644 index 000000000..6d32c11fc --- /dev/null +++ b/_docs/pipelines/steps.md @@ -0,0 +1,1226 @@ +--- +title: "Steps in pipelines" +description: "Types of steps in Codefresh pipelines" +group: pipelines +redirect_from: + - /docs/steps/ +toc: true +--- + +Codefresh [pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) are composed of a series of steps. + +You can create your own pipelines by writing a [codefresh.yml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) file that describes your pipeline. This file can then be version controlled on its own (pipeline as code). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +url="/images/pipeline/codefresh-yamlstages/complex-pipeline.png" +alt="Pipeline steps" +caption="Pipeline steps" +max-width="80%" +%} + + + +## Built-in step types + +The steps offered by Codefresh are: + +* [Git clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) + **Git clone** steps allow you to checkout code in your pipeline from any internal or external repository. Existing accounts that still use repositories instead of [projects]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-concepts) have an implicit clone step in the pipelines. + +* [Freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + **Freestyle** steps are the cornerstone of Codefresh pipelines. They allow you to run any command within the context of a Docker container. A lot of Codefresh optimizations such as the [shared docker volume]({{site.baseurl}}/docs/configure-ci-cd-pipeline/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) are designed specifically for freestyle steps. +Freestyle steps are a secure replacement for `docker run` commands. + +* [Build]({{site.baseurl}}/docs/pipelines/steps/build/) + **Build** steps are the main way where you get access to the Docker daemon (Docker as a service) in Codefresh pipelines. Build steps take as input any Dockerfile and run it on the cloud in a similar manner to what you do on your workstation. Build steps automatically push the result to the default Docker registry of your account (no need for docker login commands). Codefresh also comes with a global Docker cache that automatically gets attached to all build nodes. Build steps are a secure replacement for `docker build` commands. + +* [Push]({{site.baseurl}}/docs/pipelines/steps/push/) +**Push** steps allow you to push and tag your docker images (created by the build step) in any [external Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). Push steps are *not* needed at all if you work with only the internal Codefresh registry. Push steps are a secure replacement for the `docker tag` and `docker push` commands. + +* [Composition]({{site.baseurl}}/docs/pipelines/steps/composition/) + **Composition** steps allow you to run multiple services together in the Codefresh infrastructure and execute unit tests or other commands against them. They are discarded once a pipeline finishes. Composition steps are a secure replacement for `docker-compose` definitions. + +* [Launch test environment]({{site.baseurl}}/docs/pipelines/steps/launch-composition/) + **Launch test environment** steps behave similar to compositions, but they persist after the pipeline ends. This is a great way to create preview environment from your pull requests and send to colleagues. + +* [Deploy]({{site.baseurl}}/docs/pipelines/steps/deploy/) + **Deploy steps** allow you to [perform Kubernetes deployments]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) in a declarative manner. They embody the Continuous Deployment aspect of Codefresh. + +* [Approval]({{site.baseurl}}/docs/pipelines/steps/approval/) + **Approval steps** allow you to pause pipelines and wait for human intervention before resuming. They allow you to embrace the concepts of Continuous Delivery. + + + +>Codefresh also supports [parallel workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/), as well as running pipelines [locally on your workstation]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/). + +## Step directory + +In the case of freestyle steps we also offer a [plugin marketplace](https://codefresh.io/steps/) with several existing plugins for popular integrations. + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-directory.png" +url="/images/pipeline/plugin-directory.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="80%" +%} + +Codefresh steps can be: + +* Private (visible only to you and your team) or public (visible to everybody via the marketplace) +* Official (supported by the Codefresh team) or community based +* Ready for production or still incubating. + +You can use any your pipelines any of the public steps already in the marketplace, any steps created by your team and any steps that you create for yourself. + +## Using custom pipeline steps + +When you create a pipeline, you will have access to two categories of steps: + +* Public steps that exist in the marketplace +* Steps that you or your team have created (visible only to you) + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/choose-step.png" +url="/images/pipeline/codefresh-yaml/steps/choose-step.png" +alt="Choosing a custom step" +caption="Choosing a custom step" +max-width="60%" +%} + +To use a step, first click on the pipeline section where you want to insert the step. +You will get a new dialog with all the details of the step along with a live preview of the exact +[yaml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) that will be inserted in your pipeline. + +For all steps you can define: + +* The title of the text (which will also be visible in the pipeline UI) +* A freetext description +* The [stage]({{site.baseurl}}/docs/pipelines/stages/) that will contain the step + +The rest of the fields are specific to each step. See the documentation of each step in order to understand what each field should contain. There are fields for each step that are marked as required and are essential for the step to work. These are marked with an asterisk. + +Once a step is added to the pipeline, you are fee to change the resulting yaml even further by just typing in the pipeline editor. + +## Creating your own step + +There are two ways to create custom steps in Codefresh. The simplest way is to package an existing CLI tool into a Docker image and use it as a freestyle step. The more advanced way is creating a typed step with explicit input and output parameters. + +Here is a summary on the two ways: + +{: .table .table-bordered .table-hover} +| | Custom freestyle step | Codefresh typed plugin | +| -------------- | ---------------------------- |-------------------------| +| Assets needed | A Docker image | A Docker image and a plugin manifest| +| Knowledge required | Docker building/pushing | Docker and Codefresh CLI | +| Step can be used | In any Docker based CI/CD platform | In Codefresh | +| Effort required | Minimal | Medium | +| Distribution via | Dockerhub | Codefresh marketplace | +| Input variables | Yes | Yes| +| Output variables | No | Yes | +| Versioning via | Docker tags | Manifest entry | +| Grouping of multiple steps | No | Yes | +| Marketplace entry | Not possible| Possible/optional | +| Best for sharing steps | with your team/company | with the world | + + + +We suggest that you start with custom freestyle steps first and only create typed plugins once you are familiar with Codefresh pipelines or want your plugin to appear in the marketplace. + + +### Creating a custom freestyle step + +As an example let's say that you need to use the [JFrog CLI](https://jfrog.com/getcli/) in a pipeline in order to interact with a Artifactory or Bintray. JFrog does not offer any Docker image that contains the CLI and you already know that all Codefresh steps [are actually Docker images]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/). + +Therefore you can easily package the CLI into a Docker image and then make it available to any Codefresh pipeline that wishes to use it. +First you create [a Dockerfile](https://github.com/kostis-codefresh/step-examples/blob/master/jfrog-cli-wrapper/Dockerfile) that packages the CLI + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM debian:stable-slim + +WORKDIR /jfrog-cli + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + +RUN curl -fL https://getcli.jfrog.io | sh + +ENV JFROG_CLI_OFFER_CONFIG false +ENV BINTRAY_LICENCES MIT + +RUN /jfrog-cli/jfrog bt config --licenses $BINTRAY_LICENCES + +RUN ln -s /jfrog-cli/jfrog /usr/local/bin/jfrog + +CMD ["/jfrog-cli/jfrog"] +{% endraw %} +{% endhighlight %} + +This is a standard Dockerfile. There is nothing specific to Codefresh in the image that gets created. You can test this Dockerfile locally with + +{% highlight shell %} +{% raw %} +docker build . -t jfrog-cli +docker run jfrog-cli +{% endraw %} +{% endhighlight %} + +In a similar manner you can package any other executable and its dependencies. You could even just package `curl` with an external URL that hosts the service that you want to interact in a Codefresh pipeline. + +Once the Dockerfile is ready, you need to push it to Dockerhub. You can either do it manually from your workstation, but it is best if you actually create a [Codefresh pipeline](https://github.com/kostis-codefresh/step-examples/blob/master/jfrog-cli-wrapper/codefresh.yml) that does it for you. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/create-custom-step.png" +url="/images/pipeline/codefresh-yaml/steps/create-custom-step.png" +alt="Creating a custom freestyle step" +caption="Creating a custom freestyle step" +max-width="80%" +%} + +Now that the image is ready and public you can notify your team that the new plugin is ready. +Everybody who wants to interact with JFrog Bintray and/or Artifactory can place [the following snippet](https://github.com/kostis-codefresh/step-examples/blob/master/jfrog-cli-wrapper/codefresh-example.yml) in a pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + run_frog_cli: + title: Running jfrog CLI inside Docker + image: kkapelon/jfrog-cli + commands: + - jfrog bt --help + - jfrog rt --help +{% endraw %} +{% endhighlight %} + +You can then customize the exact command(s) that you want to run with the tool. All capabilities of freestyle steps are possible, such as passing environment variables as input parameters. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + run_frog_cli: + title: Running jfrog CLI inside Docker + image: kkapelon/jfrog-cli + commands: + - jfrog bt package-show google/tensorflow/tensorflow + environment: + - BINTRAY_USER=my-user + - BINTRAY_KEY=my-secret-key +{% endraw %} +{% endhighlight %} + +If you want to use multiple versions of the step in the same pipeline, you can just create different docker tags. Notice that you can also use a [private registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) instead of Dockerhub if you wish your step to be used only within your organization. + + + +### Creating a typed Codefresh plugin + +You can use the [Codefresh CLI](https://codefresh-io.github.io/cli/) and more specifically the [step-type resource](https://codefresh-io.github.io/cli/steps/) to create your own typed step. Each Codefresh step is composed from two parts: + +1. The step description in the special yaml syntax for describing Codefresh steps +1. A Docker image that implements the step (optional) + +The easiest way to create your own step is to start by using the definition of an existing step. + +{% highlight bash %} +codefresh get step-type vault -o yaml > vault-step.yml +{% endhighlight %} + +Here is the resulting yaml: + + `vault-step.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: / + isPublic: false + description: >- + The plugin exports KV pairs from Hashicorp Vault to Codefresh pipeline ENV + variables + sources: + - 'https://github.com/codefresh-io/steps/tree/master/incubating/vault' + stage: incubating + maintainers: + - name: Alexander Aladov + categories: + - featured + official: false + tags: [] + icon: + type: svg + url: 'https://cdn.jsdelivr.net/gh/codefresh-io/steps/incubating/vault/icon.svg' + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + Vault_to_Env: + title: Importing vault values + type: vault + arguments: + VAULT_ADDR: '${{VAULT_ADDR}}' + VAULT_PATH: '${{VAULT_PATH}}' + VAULT_AUTH_TOKEN: '${{VAULT_AUTH_TOKEN}}' + VAULT_CLIENT_CERT_BASE64: '${{VAULT_CLIENT_CERT_BASE64}}' + VAULT_CLIENT_KEY_BASE64: '${{VAULT_CLIENT_KEY_BASE64}}' + created_at: '2019-07-03T14:57:02.057Z' + updated_at: '2019-09-18T08:15:28.476Z' + latest: true + version: 0.0.1 + id: 5d1cc23ea7e22e40227ea75d +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "VAULT_ADDR", + "VAULT_PATH", + "VAULT_AUTH_TOKEN" + ], + "properties": { + "VAULT_ADDR": { + "type": "string", + "description": "Vault server URI. Example: https://vault.testdomain.io:8200 (required)" + }, + "VAULT_PATH": { + "type": "string", + "description": "Path to secrets in vault. Example: secret/codefreshsecret (required)" + }, + "VAULT_AUTH_TOKEN": { + "type": "string", + "description": "Vault authentication token (required)" + }, + "VAULT_CLIENT_CERT_BASE64": { + "type": "string", + "description": "Base64 encoded client cerificate" + }, + "VAULT_CLIENT_KEY_BASE64": { + "type": "string", + "description": "Base64 encoded client key" + } + } + } + steps: + main: + name: vault + image: codefreshplugins/vault + environment: + - 'VAULT_ADDR=${{VAULT_ADDR}}' + - 'VAULT_PATH=${{VAULT_PATH}}' + - 'VAULT_AUTH_TOKEN=${{VAULT_AUTH_TOKEN}}' + - 'VAULT_CLIENT_CERT_BASE64=${{VAULT_CLIENT_CERT_BASE64}}' + - 'VAULT_CLIENT_KEY_BASE64=${{VAULT_CLIENT_KEY_BASE64}}' +{% endraw %} +{% endhighlight %} + +For each step you define the following sections: + +* Metadata to describe the characteristics of the step +* The description of its arguments +* The implementation (i.e. what yaml gets inserted in the pipeline) + +For the metadata section note the following: + +* `isPublic` decides if this step is visible only to your and your team, or visible to all (in the marketplace) +* The `name` of the step **must** be prefixed with your Codefresh account name. Steps created by the Codefresh team are on the root level of the hierarchy (without prefix). This is the same pattern that Dockerhub is using for images. +* `stage` shown if this step is ready for production or still incubating. This is just an indication to users. It doesn't affect the implementation of the step in any way +* `icon`. Ideally you provide a transparent svg so that the icon is scalable. The icon for a step is used both in the marketplace as well as the pipeline view. You can also select a default background to be used. Alternatively, you can define jpg/png icons for large/medium/small sizes. We suggest the svg approach +* The `version` property allows you to update your plugin and keep multiple variants of it in the marketplace +* The `examples` section will be shown in the marketplace as documentation for your step + +For the argument section we follow the [JSON Schema](http://json-schema.org/learn/miscellaneous-examples.html). You can use the [Schema generator](https://jsonschema.net/) to easily create a schema. JSON schema is used for arguments (i.e. input parameters) as well as output parameters as we will see later on. + +The property `additionalProperties` defines how strict the plugin will be with its arguments. If you set it to `false` (which is usually what you want) the pipeline will fail if the plugin is given more arguments that it is expecting. If you set it to `true`, then the plugin will only use the arguments it understands and will ignore the rest. + +The final part is the step implementation. Here you can define exactly the yaml that this step will insert in the pipeline. You can use any of the built-in steps in Codefresh and even add multiple steps. + +>Note that currently you cannot nest custom pipeline steps. We are aware of this limitation and are actively working on it, but at the time or writing you cannot use a typed step inside another typed step. + +Once you are done with your step, use the Codefresh CLI to upload it to the marketplace. If you want the step to be available only to you and your team make sure that the property `isPublic` is false (and then it will not be shown in the marketplace). + +{% highlight bash %} +codefresh create step-type -f my-custom-step.yml +{% endhighlight %} + +If you make further changes to your step you can update it: + +{% highlight bash %} +codefresh replace step-type -f my-custom-step.yml +{% endhighlight %} + +If you want to remove your step from the marketplace, you can delete it completely: + +{% highlight bash %} +codefresh delete step-type kostis-codefresh/sample +{% endhighlight %} + +### Versioning of typed steps + +The `version` property under `metadata` in the plugin manifest allows you to publish multiple releases of the same plugin in the marketplace. Codefresh will keep all previous plugins and users are free to choose which version they want. + +To create a new version of your plugin: + +1. Update the `version` property under `metadata` in your custom YAML. +2. Run: + +{% highlight bash %} +codefresh create step-type -f custom-plugin.yaml +{% endhighlight %} + +You will now be able to see the new versions of your plugin in the step marketplace drop-down: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/step-versions.png" +url="/images/pipeline/codefresh-yaml/steps/step-versions.png" +alt="Different step versions" +caption="Different step versions" +max-width="60%" +%} + +You can also use the Codefresh CLI to list all version: + +{% highlight bash %} +codefresh get step-types kostis-codefresh/sample --versions +{% endhighlight %} + +To delete a specific version, use: + +{% highlight bash %} +codefresh delete step-type 'account/plugin:' +{% endhighlight %} + +Note that Codefresh step versions function like Docker tags in the sense that they are *mutable*. You can overwrite an existing plugin version with a new plugin manifest by using the `codefresh replace step-type` command. + +If users do not define a version once they use the plugin, the latest one (according to [semantic versioning](https://semver.org/)) will be used. Alternatively they can specify the exact version they need (even different versions within the same pipeline.) + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_step_1: + title: Running old custom step + type: kostis-codefresh/sample:1.2.1 + my_step_2: + title: Running new custom step + type: kostis-codefresh/sample:1.3.5 +{% endraw %} +{% endhighlight %} + +### Example with input parameters + +Let's create a very simple step called *node-version*. This step will read the application version from a NodeJS project and expose it as an environment variable. This way we can use the application version later in the pipeline (for example to tag a docker image). + +Here is the respective [step yaml](https://github.com/kostis-codefresh/step-examples/blob/master/node-version-plugin/read-app-version.yml). + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/node-version + isPublic: false + description: >- + The plugin exports as an environment variable the application version from package.json + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - utility + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/nodejs-icon.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'my-github-user/my-github-repo' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/node-version + arguments: + PACKAGE_JSON_FOLDER: './my-github-repo' + print_app_version: + title: Printing app version + image: alpine + commands: + - echo $APP_VERSION + latest: true + version: 1.0.0 +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "PACKAGE_JSON_FOLDER" + ], + "properties": { + "PACKAGE_JSON_FOLDER": { + "type": "string", + "description": "folder where package.json is located" + } + } + } + steps: + main: + name: kostis-codefresh/node-version + image: node + commands: + - cd $WORK_DIR + - pwd + - APP_VERSION=$(node -p -e "require('./package.json').version") + - echo $APP_VERSION + - export APP_VERSION + - cf_export APP_VERSION + environment: + - 'WORK_DIR=${{PACKAGE_JSON_FOLDER}}' +{% endraw %} +{% endhighlight %} + +If you look at the `spec` section you will see that the plugin expects a single parameter called `PACKAGE_JSON_FOLDER`. This will +be passed by the plugin user to specify the folder that contains the `package.json` file. This way this plugin can be used for multiple applications. For example, the plugin user might check out 3 different Node.js projects and use the plugin to read the versions of all of them. + +The plugin implementation is specified in the `steps` sections. We use the standard [Node Docker image](https://hub.docker.com/_/node) to read the version from the `package.json` file. Notice how we convert the plugin argument to an environment variable called `WORK_DIR` + +By default all plugins start with the Codefresh volume at `/codefresh/volume` as a working folder. So with the `cd` command we enter the project folder (which we assume was checked out in a previous pipeline step). Once the version is read it is made available to all the other pipeline steps with the [cf_export command]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command). + +We now insert our plugin in the marketplace with the following command: + +{% highlight bash %} +codefresh create step-type -f read-app-version.yml +{% endhighlight %} + +The step is now ready to be used by anybody. + +An example user pipeline is shown at [codefresh.yml](https://github.com/kostis-codefresh/step-examples/blob/master/node-version-plugin/codefresh.yml) + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/example_nodejs_postgres' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/node-version + arguments: + PACKAGE_JSON_FOLDER: './example_nodejs_postgres' + print_app_version: + title: Printing app version + image: alpine + commands: + - echo $APP_VERSION +{% endraw %} +{% endhighlight %} + +This is a very simple pipeline that checks out a NodeJS project and uses our plugin. Notice how we pass as argument the required parameter `example_nodejs_postgres` to tell the plugin where our `package.json` file is located. Once the plugin runs the application version is available as an environment variable that we can use in other steps as `APP_VERSION`. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/input-parameters.png" +url="/images/pipeline/codefresh-yaml/steps/input-parameters.png" +alt="Step input parameters" +caption="Step input parameters" +max-width="60%" +%} + +The input parameter is also shown as required in the marketplace. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png" +url="/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png" +alt="Input parameters on marketplace" +caption="Input parameters on marketplace" +max-width="40%" +%} + +This is a trivial example, but is still shows how Codefresh pipeline can be declarative while actually doing a lot of imperative actions behind the scenes. + +### Example with output parameters + +In the previous example our plugin had an output parameter (`APP_VERSION`) that is created by the custom step and given back to the user. Even though creating an output parameter using only `cf_export` will work just fine in the technical level, it is best to formally define output parameters in the step definition. + +If you define output parameters in the step definition their names will appear on the marketplace and users will have an easier time understand what your step produces. You will be able to define complete JSON objects in addition to output strings. Formal output parameters are also available under a special notation (`step.outputs`) that we will explain in this example. + +We suggest you always formalize your output parameters in your step definition, especially when your step is having a large number of output parameters. + +The same [JSON Schema](http://json-schema.org/learn/miscellaneous-examples.html) is also used for output parameters as with input ones. +Here is a [very simple example](https://github.com/kostis-codefresh/step-examples/blob/master/output-parameters/output-parameters-sample.yml) that shows the different types of output parameters you can have. + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/output-parameters-example + isPublic: false + description: >- + The plugin shows how you can export output parameters + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - utility + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/bash-1.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + dummy_parameters: + title: Creating output parameters + type: kostis-codefresh/output-parameters-example + print_my_variables: + title: Printing dummy content + image: alpine + commands: + - echo $MY_NUMBER + - echo $MY_CITY + - echo $MY_FAVORITE_FOOD + - echo ${{steps.dummy_parameters.output.MY_NUMBER}} + - echo ${{steps.dummy_parameters.output.MY_CITY}} + - echo ${{steps.dummy_parameters.output.MY_FAVORITE_FOOD}} + latest: true + version: 1.0.0 +spec: + returns: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": true, + "patterns": [], + "required": [ + "MY_NUMBER", + "MY_CITY", + "MY_FAVORITE_FOOD" + ] + , + "properties": { + "MY_NUMBER": { + "type": "number", + "description": "an example variable that holds a number" + }, + "MY_CITY": { + "type": "object", + "description": "an example variable that holds a JSON object", + "required": ["city_name", "country", "population"], + "properties": { + "city_name": {"type": "string"}, + "country": {"type": "string"}, + "population": {"type": "integer"} + } + }, + "MY_FAVORITE_FOOD": { + "description": "an example variable that holds a number", + "type": "array", + "maxItems": 3, + "items": { + "type": "string" + } + } + } + } + steps: + main: + name: kostis-codefresh/output-parameters-example + image: alpine + commands: + - cf_export MY_NUMBER=42 + - cf_export MY_CITY='{"city_name":"San Francisco", "country":"usa","population":884363}' + - cf_export MY_FAVORITE_FOOD='["pizza", "ramen", "hot dogs"]' + +{% endraw %} +{% endhighlight %} + +This plugin exports 3 output parameters + +* `MY_NUMBER` - a single number +* `MY_CITY` - an object with fields `city_name`, `country`, `population` +* `MY_FAVORITE_FOOD` - an array. + +Output parameters are defined in the `returns` block. +The output parameters of the step are now shown in the marketplace so consumers of this plugin know what to expect when they use it. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png" +url="/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png" +alt="Output parameters on marketplace" +caption="Output parameters on marketplace" +max-width="40%" +%} + +As can be seen from the `examples` block, when you have formal output parameters you can also access them by mentioning the specific steps in your pipeline that creates it. The following are two equal ways to use an output parameter in your pipeline: + +``` +{% raw %} +echo $MY_NUMBER +echo ${{steps.dummy_parameters.output.MY_NUMBER}} +{% endraw %} +``` + +In the case of output parameters that are objects you can also use `jq` to get specific properties like this: + +``` +{% raw %} +echo ${{steps.dummy_parameters.output.MY_CITY}} | jq '.city_name' +{% endraw %} +``` + +This will print "San Francisco". + + +### Example with input/output parameters + +Let's take everything we learned from the previous examples and create a custom step that has + +1. A custom Docker image +1. Formal input parameters +1. Format output parameters + +In this simple example we will create a custom step that reads the Maven coordinates from a `pom.xml` file. Unlike `package.json`, a Maven file has 3 characteristics (group, artifact name and version). First we create a [very simple executable](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/mvncoords.go) that reads a Maven file and gives us these coordinates in JSON format. + +{% highlight shell %} +{% raw %} +mvncoords -f pom.xml +{"groupId":"com.example.codefresh","artifactId":"my-java-app","version":"3.0.2"} +{% endraw %} +{% endhighlight %} + +Next, we package this executable in a [Dockerfile](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/Dockerfile). + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.12-alpine AS build_base + +WORKDIR /tmp/ + +COPY . . + +# Unit tests +RUN go test -v + +# Build the Go app +RUN go build -o ./out/mvncoords . + +# Start fresh from a smaller image +FROM alpine:3.9 + +COPY --from=build_base /tmp/out/mvncoords /usr/local/bin/mvncoords + +CMD ["mvncoords"] +{% endraw %} +{% endhighlight %} + +We now have a custom Docker image that contains our executable. If we want other people to use it, we need to push it to Dockerhub. You can do this manually from your workstation using `docker login` and `docker push` commands, but it is much better to automate this with a Codefresh pipeline. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/create-plugin-image.png" +url="/images/pipeline/codefresh-yaml/steps/create-plugin-image.png" +alt="Building a public Docker image" +caption="Building a public Docker image" +max-width="60%" +%} + +This [pipeline](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/codefresh.yml) checks out the Dockerfile plus source code, builds the docker image and then pushes it to Dockerhub (so that the image is public). + +Finally we are ready to create our Codefresh plugin. Here is the [specification](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/read-maven-version.yml): + + + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/mvn-version + isPublic: false + description: >- + The plugin exports as an environment variable the mvn coordinates from pom.xml + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - utility + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/java-4.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'my-github-user/my-github-repo' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/mvn-version + arguments: + POM_XML_FOLDER: './my-github-repo' + print_app_version: + title: Printing app coordinates + image: alpine + commands: + - echo $MVN_COORDS + - echo ${{steps.read_app_version.output.MVN_COORDS}} + latest: true + version: 1.0.0 +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "POM_XML_FOLDER" + ], + "properties": { + "POM_XML_FOLDER": { + "type": "string", + "description": "folder where pom.xml is located" + } + } + } + returns: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": true, + "patterns": [], + "required": [ + "MVN_COORDS" + ], + "properties": { + "MVN_COORDS": { + "type": "object", + "required": ["groupId", "artifactId", "version"], + "properties": { + "groupId": {"type": "string"}, + "artifactId": {"type": "string"}, + "version": {"type": "string"} + } + } + } + } + steps: + main: + name: kostis-codefresh/mvn-version + image: kkapelon/maven-version-extract + commands: + - cd $WORK_DIR + - MVN_COORDS=$(mvncoords -json) + - export MVN_COORDS + - cf_export MVN_COORDS + environment: + - 'WORK_DIR=${{POM_XML_FOLDER}}' +{% endraw %} +{% endhighlight %} + +We place this plugin into the marketplace with + +``` +codefresh create step-type -f read-maven-version.yml +``` + +If you look at the plugin entry in the marketplace you will see both input (the folder of the pom.xml) and output parameters (mvn coordinates) defined: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/plugin-parameters.png" +url="/images/pipeline/codefresh-yaml/steps/plugin-parameters.png" +alt="Input and output parameters" +caption="Input and output parameters" +max-width="60%" +%} + +The plugin is now ready to be used in a pipeline: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/plugin-usage.png" +url="/images/pipeline/codefresh-yaml/steps/plugin-usage.png" +alt="Plugin usage" +caption="Plugin usage" +max-width="60%" +%} + +If you look at the [pipeline definition](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/codefresh-example.yml) you will see how we pass arguments in the plugin and get its output with the `steps.output` syntax. + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/mvn-version + arguments: + POM_XML_FOLDER: './spring-boot-2-sample-app' + print_app_version: + title: Printing app version + image: alpine + commands: + - echo $MVN_COORDS + - echo ${{steps.read_app_version.output.MVN_COORDS}} +{% endraw %} +{% endhighlight %} + +This was a trivial example, but it clearly demonstrates how a custom step communicates with the rest of the pipeline by getting input from the previous steps and preparing output for the steps that follow it. + +### Exporting parameters manually inside a plugin + +Normally, in a pipeline you can either use the [cf_export]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command) command or write directly to the [/codefresh/volume/env_vars_to_export]({{site.baseurl}}/docs/pipelines/variables/#directly-writing-to-the-file) file. + +However, inside a plugin you can also use the `/meta/env_vars_to_export` file that has the same semantics, but is used for exporting variables in the same scope as the plugin only. + +The rules for using `/meta/env_vars_to_export` are: +- When the step-type (plugin) does not define the `return` schema, all the output variables from substeps will be projected and exported as the root step (they may override each other). +- When `return` schema is defined, only the variables that matched the definition will be exported as root step. + +`plugin.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: /my-step + ... +spec: + arguments: |- + { + ... + } + returns: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": true, + "patterns": [], + "required": [ + "ROOT_VAR" + ] + , + "properties": { + "ROOT_VAR": { + "type": "string", + "description": "an example variable" + } + } + } + steps: + export_my_variable: + title: "Exporting custom variable" + image: alpine + commands: + - echo PLUGIN_VAR=Alice >> /meta/env_vars_to_export + - echo ROOT_VAR=Bob >> /meta/env_vars_to_export + read_my_variable: + title: "Reading custom variable" + image: alpine + commands: + - source /meta/env_vars_to_export + - echo $PLUGIN_VAR #Alice + - echo $ROOT_VAR #Bob +{% endraw %} +{% endhighlight %} + + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + plugin: + type: /my-step + echo: + image: alpine + commands: + - echo $PLUGIN_VAR #empty + - echo $ROOT_VAR #Bob +{% endraw %} +{% endhighlight %} + +You can still use `cf_export` command inside the plugin as well (as shown in the previous examples). + + +### Example with step templating + +As an advanced technique, Codefresh allows you to define a custom step using templating instead of fixed YAML. We support templates inside the `spec:` block of a plugin definition by taking advantage of the [Gomplate](https://github.com/hairyhenderson/gomplate) library that offers additional templating functions on top of vanilla [Go templates](https://golang.org/pkg/text/template/). + +> Note: Gomplate Data functions will not work since Codefresh does not pass the Data object to gomplate functions. + +As a simple example lets say we want to create a single step that checks out any number of git repositories. Of course you could just copy-paste the git clone step multiple times in a single pipeline. To make things easier we will create a single step that takes an array of git repositories and checks them out on its own: + +{% highlight yaml %} +{% raw %} +checkout_many_projects: + title: Checking out my Git projects + type: kostis-codefresh/multi-git-clone + arguments: + GIT_PROJECTS: + - 'codefresh-contrib/ruby-on-rails-sample-app' + - 'kubernetes/sample-apiserver' + - 'kostis-codefresh/nestjs-example' + - 'spring-projects/spring-petclinic' +{% endraw %} +{% endhighlight %} + +The GitHub projects are passed as an array, so if we want to check out an additional project, we simply add items to that array. + +Here is the [step specification](https://github.com/kostis-codefresh/step-examples/blob/master/multi-clone/multi-clone-step.yml): + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/multi-git-clone + isPublic: false + description: >- + This pipeline plugin shows templating of custom steps + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - git + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/git.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + checkout_many_projects: + title: Checking out my Git projects + type: kostis-codefresh/multi-git-clone + arguments: + GIT_REVISION: 'master' + GIT_PROVIDER: 'github' + GIT_PROJECTS: + - 'codefresh-contrib/ruby-on-rails-sample-app' + - 'kubernetes/sample-apiserver' + - 'kostis-codefresh/nestjs-example' + - 'spring-projects/spring-petclinic' + latest: true + version: 1.0.0 +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "GIT_PROJECTS", + "GIT_REVISION", + "GIT_PROVIDER" + ], + "properties": { + "GIT_REVISION": { + "type": "string", + "description": "branch or tag or revision to checkout (same for all projects)" + }, + "GIT_PROVIDER": { + "type": "string", + "description": "Name of git provider to use from Codefresh integrations screen" + }, + "GIT_PROJECTS": { + "description": "A list/array of git projects to checkout", + "type": "array", + "maxItems": 10, + "items": { + "type": "string" + } + } + } + } + delimiters: + left: '[[' + right: ']]' + stepsTemplate: |- + print_info_message: + name: kostis-codefresh/multi-git-clone + title: Info message + image: alpine + commands: + - echo "Checking out [[ len .Arguments.GIT_PROJECTS ]] git projects" + [[ range $index, $git_project :=.Arguments.GIT_PROJECTS ]] + clone_project_[[$index]]: + title: Cloning [[$git_project]] ... + type: git-clone + repo: '[[$git_project]]' + revision: [[$.Arguments.GIT_REVISION]] + git: [[$.Arguments.GIT_PROVIDER]] + [[end]] +{% endraw %} +{% endhighlight %} + +There are two important points here: + +1. Instead of using a `steps:` block, we instead define a block called `stepsTemplate:`. This block name instructs Codefresh that we will use templates +1. Because the Codefresh runtime is already using the double curly braces for variables mentioned as {% raw %}`${{MY_VARIABLE_EXAMPLE}}`{% endraw %}, we instead define templates with the characters {% raw %}`[[]]`{% endraw %}. You can see the definitions for these characters inside the `delimiters:` block. You are free to use any other replacement characters of your choosing. + +In the `stepsTemplate` block we use Golang template keywoards such as `range`, `len` and template variables (such as `git_project`). You can use all the capabilities of Go templates (e.g. `if`, `range`, `with`) as well as the extra methods of [gomplate](https://docs.gomplate.ca/) such as math and net functions. + +Creating the [marketplace entry](https://codefresh.io/steps/step/kostis-codefresh%2Fmulti-git-clone) for a step with templates is exactly the same as any other step: + +``` +codefresh create step-type -f multi-clone-step.yml +``` + +You can then use the step in [any pipeline](https://github.com/kostis-codefresh/step-examples/blob/master/multi-clone/codefresh.yml) and pass the arguments that will fill the template: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + checkout_many_projects: + title: Checking out my Git projects + type: kostis-codefresh/multi-git-clone + arguments: + GIT_REVISION: 'master' + GIT_PROVIDER: 'github' + GIT_PROJECTS: + - 'codefresh-contrib/ruby-on-rails-sample-app' + - 'kubernetes/sample-apiserver' + - 'kostis-codefresh/nestjs-example' + - 'spring-projects/spring-petclinic' + print_my_workspace: + title: Show projects + image: alpine + commands: + - ls -l + - pwd +{% endraw %} +{% endhighlight %} + +We have also added two extra parameters, one for the git revision and one for the [git provider]({{site.baseurl}}/docs/integrations/git-providers/) that will be used during checkout. + +The end result is that with a single step you can checkout many projects. Checking out an additional project is as simple as adding a new entry in the `GIT_PROJECTS` array. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/multi-checkout.png" +url="/images/pipeline/codefresh-yaml/steps/multi-checkout.png" +alt="Checking out multiple Git repositories in a single step" +caption="Checking out multiple Git repositories in a single step" +max-width="60%" +%} + +This was a contrived example to demonstrate how you can use templates in the Codefresh plugin specification. Note that using templates in Codefresh steps is an advanced technique and should be used sparingly. + +### Limitations of custom plugins + +[Parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/) are not supported inside custom steps. + +Within a custom step, the [fail_fast field]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#execution-flow) does not work. Use the `failFast` field instead. + +Custom steps are not compatible with [service containers]({{site.baseurl}}/docs/pipelines/service-containers/). +More specifically: + + * If you have a [service container in the pipeline-level]({{site.baseurl}}/docs/pipelines/service-containers/#running-services-for-the-duration-of-the-pipeline), steps inside the custom plugin will not be able to access it + * If you try to attach a service container to a custom plugin, the plugin will fail when executed + * If you try to define a custom plugin where a step inside it has a service container attached, the custom plugin will fail when executed + +## Related articles +[Introduction to Pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) +[Build step]({{site.baseurl}}/docs/pipelines/steps/build/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) + diff --git a/_docs/pipelines/steps/approval.md b/_docs/pipelines/steps/approval.md new file mode 100644 index 000000000..a82a83149 --- /dev/null +++ b/_docs/pipelines/steps/approval.md @@ -0,0 +1,348 @@ +--- +title: "Approval" +description: "How to Pause Pipelines for Manual Approval" +group: codefresh-yaml +sub_group: steps +toc: true +--- + +The approval step allows you to pause a pipeline and wait for human intervention before going on. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/approval-waiting.png" +url="/images/pipeline/codefresh-yaml/approval/approval-waiting.png" +alt="Manual Approval step" +caption="Manual Approval step" +max-width="80%" +%} + +Some example scenarios for using the approval step: + +* Pause before deploying to production +* Pause before destroying an environment +* Pause for some manual smoke tests or metric collection + +## Usage + + `YAML` +{% highlight yaml %} +{% raw %} +step_name: + type: pending-approval + title: Step Title + description: Step description + timeout: + duration: 2 + finalState: approved + timeUnit: minutes + when: + branch: + only: [ master ] + +{% endraw %} +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `timeout` | Defines an automatic approval/rejection if a specified amount of time has passed. The `duration` field is hours. By default it is set to 168 (i.e, 7 days). The `finalState` field defines what will happen after the duration time has elapsed. Possible values are `approved`/`denied`/`terminated` | Optional | +| `timeUnit` | This field defines possible options of `minutes`, or `hours`. If the field is not set, the default is `hours` | Optional +| `fail_fast` | If set to false, the pipeline will continue even when the step is rejected | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `when` | Define a set of conditions that need to be satisfied in order to execute this step. You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) article. | Optional | + + +## Pausing the Pipeline + +Once the pipeline reaches an approval step it will stop. At this point it **does not** consume any resources. +In the Codefresh UI you will see the *Approve/Reject* buttons. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/build-waiting.png" +url="/images/pipeline/codefresh-yaml/approval/build-waiting.png" +alt="Build waiting for input" +caption="Build waiting for input" +max-width="80%" +%} + +Once you click any of them the pipeline will continue. Further steps in the pipeline can be enabled/disabled +according to the approval result. + +## Automatic Approvals/Rejections + +By default, a pipeline that contains an approval step will pause for 7 days (168 hours) onces it reaches that step. If you want some automatic action to happen after a specified time period you can define it in advance with the `timeout` property: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + waitForInputBeforeProduction: + type: pending-approval + title: Deploy to Production? + timeout: + duration: 2 + finalState: denied +{% endraw %} +{% endhighlight %} + +This pipeline will wait for approval for two hours. If somebody approves it, it will continue. If nothing happens after two hours +the approval step will be automatically rejected. + +## Approval Restrictions + +By default, any Codefresh user can approve any pipeline that is paused at the approval state. If you want to restrict +the approval action to a subset of people, you can use the [Access Control facilities]({{site.baseurl}}/docs/enterprise/access-control/) that Codefresh provides. + +This is a two-step process. First you need to tag your pipeline with one or more tags (tag names are arbitrary). You can edit tags in the pipeline settings screen. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/pipeline-tag.png" +url="/images/pipeline/codefresh-yaml/approval/pipeline-tag.png" +alt="Marking a pipeline with tags" +caption="Marking a pipeline with tags" +max-width="40%" +%} + +Once you have tagged your pipelines you can create one or more access rules that restrict approval to specific teams within your organization. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/approval-rule.png" +url="/images/pipeline/codefresh-yaml/approval/approval-rule.png" +alt="Rules for approvals" +caption="Rules for approvals" +max-width="80%" +%} + + +For more details on access control and users see also the [access control page]({{site.baseurl}}/docs/administration/access-control/). + +## Keeping the Shared Volume after an Approval + +As soon as a pipeline starts waiting for an approval, all contents of the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) are lost. Once the pipeline continues running all files that were created manually inside the volume are not available any more. + +If you want to keep any temporary files that were there before the approval, you need to enable the respective policy in your [pipeline settings]({{site.baseurl}}/docs/pipelines/pipelines/#policies). + +You can either set this option differently per pipeline, or globally in your account at your [account settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings). + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/keep-volume.png" +url="/images/codefresh-yaml/approval/keep-volume.png" +alt="Preserve Codefresh volume after an approval" +caption="Preserve Codefresh volume after an approval" +max-width="90%" +%} + +>Notice that if you do decide to keep the volume after an approval, the pipeline will still count as "running" against your pricing plan (if you use the SAAS version of Codefresh). If you don't keep the volume, the pipeline is stopped/paused while it is waiting for approval and doesn't count against your pricing plan. We advise you to keep the volume only for pipelines that really need this capability. + +>Notice also that you if you use the [Codefresh Runner]({{site.baseurl}}/docs/reference/behind-the-firewall/) and your [Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) is setup with local volumes, then the volume will only be present if the dind pod +is scheduled in the same node once the pipeline resumes. Otherwise the volume will not be reused. + +## Controlling the Rejection Behavior + +By default if you reject a pipeline, it will stop right away and it will be marked as failed. All subsequent steps after the approval one will not run at all. + +You might want to continue running the pipeline even when it is rejected by adding the `fail_fast` property in the approval step: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + waitForInputBeforeProduction: + fail_fast: false + type: pending-approval + title: Deploy to Production? +{% endraw %} +{% endhighlight %} + +In this case you can also read the approval result and make the pipeline work differently according to each choice (demonstrated in the following section). + + +## Getting the Approval Result + +As also explained in [step dependencies]({{site.baseurl}}/docs/pipelines/advanced-workflows/#custom-steps-dependencies) all steps in the Codefresh pipeline belong to a global object +called `steps` (indexed by name). You can read the `result` property for an approval step to see if it was approved or rejected. + +Here is an example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + askForPermission: + type: pending-approval + title: Destroy QA environment? + destroyQaEnvNow: + image: alpine:3.8 + title: Destroying env + commands: + - echo "Destroy command running" + when: + steps: + - name: askForPermission + on: + - approved +{% endraw %} +{% endhighlight %} + +In this example the second step that is destroying an environment will only run if the user +approves the first step. In case of rejection the second step will be skipped. + +You can follow the same pattern for running steps when an approval step was rejected. +Here is a full example with both cases. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- prepare +- yesPleaseDo +- noDont + +steps: + step_1: + image: alpine:3.8 + title: building chart + stage: prepare + commands: + - echo "prepare" + deployToProdNow: + fail_fast: false + type: pending-approval + title: Should we deploy to prod + stage: prepare + step_2: + image: alpine:3.8 + title: prepare environment + stage: yesPleaseDo + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - approved + step_3: + image: alpine:3.8 + title: deploy to production + stage: yesPleaseDo + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - approved + step_4: + image: alpine:3.8 + title: prepare environment + stage: noDont + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - denied + step_5: + image: alpine:3.8 + title: deploy to staging + stage: noDont + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - denied +{% endraw %} +{% endhighlight %} + +Here is the pipeline state after a rejection: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png" +url="/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png" +alt="Rejecting a pipeline" +caption="Rejecting a pipeline" +max-width="80%" +%} + +>Note that we have added the `fail_fast` property in the approval step because we want the pipeline to continue even when the step is rejected. + + +You can see that only two steps were ignored. If you rerun the pipeline and approve +it, the other two steps will be ignored. + +## Define Concurrency Limits + +Codefresh has the ability to limit the amount of running builds for a specific pipeline with several concurrency policies in the pipeline settings. You can choose if a build that is in a pending approval state will count against the concurrency limits or not. + +As an example let's say that the concurrency limit for a specific pipeline is set to 2. Currently there is one active/running build and a second build that is pending approval. + +1. If the pipeline settings define that builds in pending approval **count** against concurrency, then if you launch a third build it will wait until one of the first two has finished +1. If the pipeline settings define that builds in pending approval **do not** count against concurrency, then if you launch a third build it will execute right away. + +There isn't a correct or wrong way to set this option. It depends on your organization and if your consider builds pending approval as "active" or not. + +You can either set this option [differently per pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#policies), or globally in your account at your [account settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings). + + +## Slack Integration + +If you also enable [Slack integration]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) in Codefresh you will have the choice of approving/rejecting a pipeline +via a Slack channel + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/slack-approval.png" +url="/images/pipeline/codefresh-yaml/approval/slack-approval.png" +alt="Approval step in a slack channel" +caption="Approval step in a slack channel" +max-width="80%" +%} + +To enable this behavior, you need to activate it in the Slack settings page: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/slack-settings.png" +url="/images/pipeline/codefresh-yaml/approval/slack-settings.png" +alt="Slack settings" +caption="Slack settings" +max-width="50%" +%} + +Also, if you run a pipeline manually that includes an approval step you should check +the "Report notification of pipeline execution" checkbox as explained in [Monitoring Pipelines]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/#monitoring-pipelines-outside-the-codefresh-ui). + + + +## Related articles +[Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/) +[Advanced Workflows ]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +[Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) + + diff --git a/_docs/pipelines/steps/build.md b/_docs/pipelines/steps/build.md new file mode 100644 index 000000000..a270af733 --- /dev/null +++ b/_docs/pipelines/steps/build.md @@ -0,0 +1,379 @@ +--- +title: "Build" +description: "Building Docker images in Codefresh pipelines" +group: pipelines +sub_group: steps +redirect_from: + - /docs/build-1/ + - /docs/codefresh-yaml/steps/build-1/ +toc: true +--- +Use Docker to build an image and store it in Codefresh. + +## Purpose of build steps + +In Codefresh, docker containers are first-class citizens +and special typed steps are offered for the most usual docker commands. Build steps are a secure replacement for `docker build` commands. + +Therefore, this command on your local workstation: + +``` +docker build . -t my-app-image:1.0.1 +``` + +will become in Codefresh the following build step. + +```yaml +BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + tag: 1.0.1 +``` + +## Usage + + `YAML` +{% highlight yaml %} +step_name: + type: build + title: Step Title + description: Free text description + working_directory: {% raw %}${{clone_step_name}}{% endraw %} + dockerfile: path/to/Dockerfile + image_name: owner/new-image-name + tag: develop + build_arguments: + - key=value + target: stage1 + no_cache: false + no_cf_cache: false + tag_policy: original + fail_fast: false + metadata: + set: + - qa: pending + when: + condition: + all: + noDetectedSkipCI: "includes('{% raw %}${{CF_COMMIT_MESSAGE}}{% endraw %}', '[skip ci]') == false" + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `working_directory` | The directory in which the build command is executed. It can be an explicit path in the container's file system, or a variable that references another step.
            The default is {% raw %} `${{main_clone}}` {% endraw %}. This only changes the Docker build context and is unrelated to the `WORKDIR` inside the Dockerile | Default | +| `dockerfile` | The path to the `Dockerfile` from which the image is built. The default is `Dockerfile`. | Default | +| `image_name` | The name for the image you build. | Required | +| `region` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The names of the regions for which to perform cross-region replication. The names of the source region and the destination region name must be defined in separate steps. | Optional | +| `tag` | The tag that is assigned to the image you build.
            The default is the name of the branch or revision that is built. | Default | +| `tags` | Multiple tags under which to push the image. Use either this or `tag`. This is an array, so should be of the following style:
            {::nomarkdown}
            tags:
            - tag1
            - tag2
            - {% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}
            - tag4
            {:/}or
            {::nomarkdown}
            tags: [ 'tag1', 'tag2', '{% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}', 'tag4' ]
            {:/} | Optional | +| `registry` | The registry logical name of one of the inserted registries from the integration view.
            The default value will be your default registry [if you have more than one]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). | Optional | +| `registry_contexts` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +|`disable_push` | Do not push to any registry automatically. | Optional | +|`tag_policy` | Push the tag name without change or lowercase it automatically. By default `tag: MixedCase` will be pushed as `image_name:mixedcase`. Possible options are `original` and `lowercase`. Default is `lowercase` | Default | +| `no_cache` | Disable Docker engine cache for the build [more info](https://codefresh.io/docs/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) | Optional | +| `no_cf_cache` | Disable Codefresh build optimization for the build [more info](https://codefresh.io/docs/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) +| `build_arguments` | A set of [Docker build arguments](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables-build-arg) to pass to the build process. | Optional | +| `target` | target stage in a multistage build (build will run until this stage) | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions that need to be satisfied in order to execute this step.
            You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `metadata` | Annotate the built image with [key-value metadata]({{site.baseurl}}/docs/docker-registries/metadata-annotations/). | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | +| `buildkit` | Set to `true` to enable [Buildkit]({{site.baseurl}}/docs/pipelines/steps/build/#buildkit-support) and all of its enhancements | Optional | + +**Exported resources:** +- Working Directory +- Image ID + +## Examples + +Build an image using a Dockerfile in the root project folder: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build +{% endhighlight %} + +Build an image using a different Dockerfile and a specific version tag + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + dockerfile: my-custom.Dockerfile + tag: 1.0.1 +{% endhighlight %} + +Build an image using a different Dockerfile and push multiple tags to the default registry. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + dockerfile: my-custom.Dockerfile + tags: + - latest + - ${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}} + - v1.1 +{% endraw %} +{% endhighlight %} + +Build an image and automatically push to the [registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) with name `my-registry`. + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + dockerfile: my-custom.Dockerfile + tag: 1.0.1 + registry: my-registry +{% endhighlight %} + +Build two images in two different folders using [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/) as tags. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildNodeImage: + title: Building My Node app + type: build + image_name: my-department/my-team/my-node-image + dockerfile: Dockerfile + working_directory: ./project1 + tag: ${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}} + BuildGoImage: + title: Building My Go app + type: build + image_name: my-company/my-go-image + dockerfile: Dockerfile + working_directory: ./project2 + tag: ${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}} +{% endraw %} +{% endhighlight %} + +It also possible to build Docker images in [parallel]({{site.baseurl}}/docs/pipelines/advanced-workflows/) for faster builds. + +### Inline Dockerfile + +If your project does not already have a Dockerfile, you can also define one within the pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: my-own-go-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: + content: |- + # --- + # Go Builder Image + FROM golang:1.8-alpine AS builder + # set build arguments: GitHub user and repository + ARG GH_USER + ARG GH_REPO + # Create and set working directory + RUN mkdir -p /go/src/github.com/$GH_USER/$GH_REPO + # copy file from builder image + COPY --from=builder /go/src/github.com/$GH_USER/$GH_REPO/dist/myapp + /usr/bin/myapp + CMD ["myapp", "--help"] +{% endraw %} +{% endhighlight %} + +Use this technique only as a last resort. It is better if the Dockerfile exists as an actual file in source control. + + +## Automatic pushing + +All images built successfully with the build step, will be automatically pushed to the default Docker registry in your account. This behavior is completely automatic and happens without any extra configuration on your part. If you want to disable this then add the `disable_push` property in your build step. + +>Notice that the [push step]({{site.baseurl}}/docs/pipelines/steps/push/) in Codefresh is optional and is only needed if you want to push to [external Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). + +{% + include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png" + url="/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png" + alt="Docker Images pushed automatically" + caption="Docker Images pushed automatically" + max-width="80%" +%} + +## Buildkit support + +Codefresh also allows you to use [buildkit](https://github.com/moby/buildkit) with all its [enhancements](https://docs.docker.com/develop/develop-images/build_enhancements/) and [experimental features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#experimental-syntaxes). + +Using buildkit you can get: + +* Improved build output logs +* Mounting of external secrets that will never be stored in the image +* Access to SSH keys and sockets from within the Dockerfile +* Use cache and bind-mounts at build time + +These capabilities are offered as extra arguments in the build step and using any of them will automatically enable buildkit. You can utilize the different mount-options for the Dockerfile instruction `RUN` as long as buildkit is enabled for your build step. Mounts of type [`cache`](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#example-cache-go-packages) work out of the box and are persisted between pipeline runs. + +The simplest way to use buildkit is by enabling it explicitly: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + buildkit: true +{% endhighlight %} + +Buildkit is also automatically enabled if you use any of its features such as the `progress` property: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + progress: tty +{% endhighlight %} + +Possible values for `progress` are `tty` and `plain`. + +For secrets you can either mention them in a single line: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + secrets: + - id=secret1,src=./my-secret-file1.txt + - id=secret2,src=./my-secret-file2.txt +{% endhighlight %} + +or multiple lines: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + secrets: + - id: secret1 + src: ./my-secret-file1.txt + - id: secret2 + src: ./my-secret-file2.txt +{% endhighlight %} + +For the SSH connection you can either use the default: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + ssh: default +{% endhighlight %} + + +or define different keys: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + ssh: + - github=~/.ssh/github_rsa + - bitbucket=~/.ssh/bitbucket_rsa +{% endhighlight %} + +You might want to use an environment variable to store and retrieve a ssh key. This can be achieved by converting you ssh key into a one-line string: +``` +tr '\n' ',' < /path/to/id_rsa +``` + +Copy the output and place it an [environment variable]({{site.baseurl}}/docs/pipelines/variables/#user-provided-variables). To make the SSH key availabe to the build step, you can write it to the codefresh volume: +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + SetupSshKeys: + title: Setting up ssh key + image: alpine:latest + commands: + - mkdir /codefresh/volume/keys + - echo "${SSH_KEY}" | tr ',' '\n' > /codefresh/volume/keys/github_rsa + + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + tag: latest + ssh: + - github=/codefresh/volume/keys/github_rsa +{% endraw %} +{% endhighlight %} + + +You can combine all options (`ssh`, `progress`, `secrets`) in a single build step if desired. + + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) diff --git a/_docs/pipelines/steps/composition.md b/_docs/pipelines/steps/composition.md new file mode 100644 index 000000000..40764e680 --- /dev/null +++ b/_docs/pipelines/steps/composition.md @@ -0,0 +1,434 @@ +--- +title: "Composition step" +description: "Run a Docker container with its dependencies inside a pipeline" +group: pipelines +sub_group: steps +redirect_from: + - /docs/composition-1/ + - /docs/codefresh-yaml/steps/composition-1/ +toc: true +--- +The composition step runs a Docker Composition as a means to execute finite commands in a more complex interaction of services. + +>Note that while composition steps are still supported, the recommended way to run integrations tests going forward is with [service containers]({{site.baseurl}}/docs/codefresh-yaml/service-containers/). + +## Motivation for Compositions + +The primary purpose of compositions is to run tests that require multiple services for their execution (often known as integration tests). + +The syntax offered by Codefresh closely follows the syntax for [Docker-compose](https://docs.docker.com/compose/overview/) files, but is technically not 100% the same (there are some important differences). However, if you are already familiar with Docker compose, you will be immediately familiar with Codefresh compositions. + +> Codefresh only understands Docker compose versions [2](https://docs.docker.com/compose/compose-file/compose-file-v2/) and [3](https://docs.docker.com/compose/compose-file/), but not point releases such as 2.1. + +The big difference between the Codefresh and Docker compose is that Codefresh is distinguishes between two kinds of services: + +* Composition Services +* Composition Candidates + +**Composition Services** are helper services that are needed for the tests to run. These can be a database, a queue, a cache, or the backend docker image of your application -- these closely parallel the services that you might define in Docker compose. + +**Composition Candidates** are special services that will execute the tests. Codefresh will monitor their execution and the build will fail if they do not succeed. Composition candidates are almost always Docker images that contain unit/integration tests or other kinds of tests (e.g. performance) + +You need at least one composition service and one candidate for the composition step. + + +## Usage + +Here is an example of a composition step. Note that there is one composition service (PostgreSQL database, named `db`) and one composition candidate (tests executed with gulp) + +The most important part is the `command` line that executes the tests: `command: gulp integration_test`. If it fails, then the whole composition step will fail. + + + + `codefresh.yml` +{% highlight yaml %} +step_name: + type: composition + title: Step Title + description: Free text description + working_directory: {% raw %}${{a_clone_step}}{% endraw %} + composition: + version: '2' + services: + db: + image: postgres + composition_candidates: + test_service: + image: {% raw %}${{build_step}}{% endraw %} + command: gulp integration_test + working_dir: /app + environment: + - key=value + composition_variables: + - key=value + fail_fast: false + when: + condition: + all: + notFeatureBranch: 'match("{% raw %}${{CF_BRANCH}}{% endraw %}", "/FB-/", true) == false' + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Caveats on sharing a docker-compose.yml + +Although Codefresh's composition syntax closely follows the syntax used in `docker-compose.yml` files, it is not 100% the same. If you are using `docker-compose.yml` locally, you may experience some problems if you try to have Codefresh reference the file (by passing it as an argument to `compose`, e.g. `compose: docker-compose.yml`). + +One subtle difference is that Docker compose will interpolate environment variables that are quoted in single-braces, e.g. `${DATABASE_URL}`, whereas Codefresh interpolates variables that are quoted in double-braces, e.g. {% raw %}`${{DATABASE_URL}}`{% endraw %}. So if your `docker-compose.yml` file relies on the parsing of ENV variables, it may not be a good candidate for sharing with Codefresh. + +## Fields + +The following describes the fields available in a step of type `composition` + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `working_directory` | The directory in which to search for the composition file. It can be an explicit path in the container's file system, or a variable that references another step. The default is {% raw %}`${{main_clone}}`{% endraw %}. Note that this is completely different from `working_dir` which is on the service level. | Default | +| `composition` | The composition you want to run. This can be an inline YAML definition or a path to a composition file on the file system, e.g. `docker-compose.yml`, or the logical name of a composition stored in the Codefresh system. We support most features of [Docker compose version 2.0](https://docs.docker.com/compose/compose-file/compose-file-v2/) and [3.0](https://docs.docker.com/compose/compose-file/) | Required | +| `version` | Version for docker compose. Use `2` or `3` | Required | +| `composition_candidates` | The definition of the service to monitor. Each candidate has a **single** `command` parameter that decides what will be tested. | Required | +| `environment` (service level) | environment that will be accessible to the container | Optional | +| `working_dir` (service level) | defines the working directory that will be used in a service before running a command. By default it is defined by the docker image that is used by the service. | Optional | +| `registry_contexts` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +| `volumes` (service level) | Extra volumes for individual services. Used for transferring information between your steps. Explained in detail later in this page. | Optional | +| `composition_variables` | A set of environment variables to substitute in the composition. Notice that these variables are docker-compose variables and **NOT** environment variables | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
            You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Composition versus Composition Candidates + +For Codefresh to determine if the step and operations were successfully executed, you must specify at least one `composition_candidate`. + +A `composition_candidate` is a single service component of the normal Docker composition that is monitored for a successful exit code and determines the outcome of the step. During runtime, the `composition_candidate` is merged into the specified `composition`and is monitored for successful execution. + +The critical part of each candidate is the `command` parameter. This takes [a single command](https://docs.docker.com/compose/compose-file/#command) that will +be executed inside the Docker container of the candidate and will decide if the whole composition is successful or not. Only one command is allowed (similar to Docker compose). If you wish to test multiple commands you need to connect them with `&&` like this. + +{% highlight yaml %} + composition_candidates: + my_unit_tests: + image: node + command: bash -c "sleep 60 && pwd && npm run test" +{% endhighlight %} + + +## Working directories in a composition + +By default, all services that take part in a composition will use as working directory the one defined by the respective image. If you want to change that, you need to use the `working_dir` parameter at the service level. + +Here is an example: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_composition: + type: composition + title: Sample composition + composition: + version: '2' + services: + my_service: + image: alpine + command: 'pwd' + working_dir: /tmp + composition_candidates: + my_test_service: + image: python + working_dir: /root + command: 'pwd' +{% endhighlight %} + +If you run this composition, you will see in the logs that the alpine image will use `/tmp` as a working directory and the python one will use `/root` + +``` +my_service_1 | /tmp +my_test_service_1 | /root +``` + +## Composition networking + +The networking in Codefresh compositions works just like normal Docker-compose. Each service is assigned a hostname that matches +its name and is accessible by other services. + +Here is an example + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + build_step: + type: build + image_name: my-node-app + dockerfile: Dockerfile + tag: ${{CF_BRANCH}} + my_db_tests: + type: composition + composition: + version: '2' + services: + db: + image: mysql:latest + ports: + - 3306 + environment: + MYSQL_ROOT_PASSWORD: admin + MYSQL_USER: my_user + MYSQL_PASSWORD: admin + MYSQL_DATABASE: nodejs + composition_candidates: + test: + image: ${{build_step}} + links: + - db + command: bash -c 'sleep 30 && MYSQL_ROOT_PASSWORD=admin MYSQL_USER=my_user MYSQL_HOST=db MYSQL_PASSWORD=admin MYSQL_DATABASE=nodejs npm test' +{% endraw %} +{% endhighlight %} + +In this composition the MySql instance will be available at host `db:3306` accessible from the node image. When the node tests run, they will be pointed to that host and port combination to access it. + +Notice also that like docker compose the order that the services are launched is not guaranteed. A quick way to solve this issue +is with a sleep statement like shown above. This will make sure that the database is truly up before the tests run. + +A better approach would be to use solutions such as [wait-for-it](https://github.com/vishnubob/wait-for-it) which are much more robust. Here is an example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + build_image: + type: build + description: Building the image... + image_name: my-spring-boot-app + tag: ${{CF_BRANCH_TAG_NORMALIZED}} + build_image_with_tests: + type: build + description: Building the Test image... + image_name: maven-integration-tests + dockerfile: Dockerfile.testing + integration_tests: + type: composition + title: Launching QA environment + description: Temporary test environment + composition: + version: '2' + services: + app: + image: ${{build_image}} + ports: + - 8080 + composition_candidates: + test_service: + image: ${{build_image_with_tests}} + links: + - app + command: bash -c '/usr/bin/wait-for-it.sh -t 20 app:8080 -- mvn verify -Dserver.host=app' +{% endraw %} +{% endhighlight %} + +In this composition a Java application is launched at `app:8080` and then a second image is used for integration tests that target that URL (passed as a parameter to Maven). + +The `wait-for-it.sh` script will make sure that the Java application is truly up before the tests are started. Notice that in the example above the script is included in the testing image (created by `Dockerfile.testing`) + +## Using public Docker images in a composition + +It is important to notice that Docker images used in a composition (both as services and candidates) will be looked from your connected registries first before looking at Dockerhub: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + my_composition: + type: composition + title: Sample composition + composition: + version: '2' + services: + my_service: + image: mysql + ports: + - 3306 + composition_candidates: + my_test_service: + image: alpine + working_dir: /root + command: 'pwd' + +{% endraw %} +{% endhighlight %} + +In the example above if you already have two images in your private registries named `mysql` and `alpine`, then *THEY* will be used instead of the respective images in Dockerhub. + +You can see which images are used in the logs of the builds: + +``` +Running composition step: Sample composition +Pulling kostisazureregistry.azurecr.io/mysql@sha256:1ee5515fed3dae4f13d0f7320e600a38522fd7e510b225e68421e1f90 +Pulling kostisazureregistry.azurecr.io/alpine@sha256:eddb7866364ec96861a7eb83ae7977b3efb98e8e978c1c9277262d327 +``` + + +## Accessing your project folder from a composition + +By default, the services of a composition run in a completely isolated manner. There are several scenarios however where you wish to access your Git files such as: + +* Using test data that is available in the project folder +* Preloading a database with a data script found in Git +* Running integration tests and then using their [results for reporting]({{site.baseurl}}/docs/testing/test-reports/) + +The Codefresh [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) is automatically mounted in [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) but **NOT** in compositions. You have to mount it yourself if you use that functionality. + +Here is an example where the shared volume is mounted in a composition -- {% raw %}`'${{CF_VOLUME_NAME}}:${{CF_VOLUME_PATH}}'`{% endraw %} is listed under `volumes`: + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + create_test_data_step: + title: Creating dummy data + image: alpine + commands: + - echo "Writing in shared volume" > /codefresh/volume/sample_text.txt + my_sample_composition: + type: composition + title: Composition with volume + composition: + version: '2' + services: + my_sample_service: + image: node + volumes: + - '${{CF_VOLUME_NAME}}:${{CF_VOLUME_PATH}}' + working_dir: '${{CF_VOLUME_PATH}}' + command: bash -c "pwd && cat sample_text.txt" + composition_candidates: + my_unit_tests: + image: python + volumes: + - '${{CF_VOLUME_NAME}}:${{CF_VOLUME_PATH}}' + working_dir: '${{CF_VOLUME_PATH}}' + command: bash -c "pwd && echo 'Finished tests' > test_result.txt" + read_test_data_step: + title: Reading dummy data + image: alpine + commands: + - ls -l /codefresh/volume + - cat /codefresh/volume/test_result.txt +{% endraw %} +{% endhighlight %} + +In this pipeline: + +1. The first freestyle step writes a simple test file in the shared volume. +1. The composition starts and both services (`my_sample_service` and `my_unit_tests`) attach the same volume. +1. The sample service reads from the shared volume (i.e. using test data that was created before). +1. The sample unit test service writes to the shared volume (emulating test results). +1. The last freestyle step reads the file that was written by the composition. + +Therefore, in this pipeline you can see both ways of data sharing, bringing files into a composition and getting results out of it. Notice that we need to mount the shared volume only in the composition services. The freestyle steps automatically mount `/codefresh/volume` on their own. + + +>Note: In order to mount the shared volume in one of your composition services, you must mount it in the `composition_candidate` also. It is not compulsory to mount the shared volume in all services of a composition. Only those that actually use it for file transfer, should mount it. + + +## Composition variables versus environment variables + +Docker compose supports [two kinds of variables in its syntax](https://docs.docker.com/compose/environment-variables/): + +* There are environment variables that are used in the docker-compose file itself (`${VAR}` syntax). +* There are environment variables that are passed in containers (`environment:` yaml group). + +Codefresh supports both kinds, but notice that variables mentioned in the +`composition_variables` yaml group refer to the *first* kind. Any variables defined there are **NOT** passed automatically to containers (use the `environment` yaml group for that purpose). + +This can be illustrated with the following example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + comp1: + type: composition + title: Composition example 1 + description: Free text description + composition: + version: '2' + services: + db: + image: alpine + composition_candidates: + test_service: + image: alpine + command: printenv + environment: + - FIRST_KEY=VALUE + composition_variables: + - ANOTHER_KEY=ANOTHER_VALUE +{% endraw %} +{% endhighlight %} + +If you run the compositio,n you will see that the `printenv` command shows the following: + +``` +test_service_1 | FIRST_KEY=VALUE +``` + +The `FIRST_KEY` variable which is defined explicitly in the `environment` yaml part is correctly passed to the alpine container. The `ANOTHER_KEY` is not visible in the container at all. + +You should use the `composition_variables` yaml group for variables that you wish to reuse in other parts of your composition using the `${ANOTHER_KEY}` syntax. + +## Merging services + +If the `composition` already contains a service with the same name as the `composition_candidate`, the two service definitions are combined, with preference given to the `composition_candidate`'s definition. + +For example, we create a new Codefresh composition named 'test_composition': + + `test-composition.yml` +{% highlight yaml %} +version: '2' + services: + db: + image: postgres + test_service: + image: myuser/mytestservice:latest + command: gulp integration_test +{% endhighlight %} + +Now we want to reuse this composition during our build for testing purposes. +We can add the following composition step to our `codefresh.yml` file and define the composition step so that `test_service` always uses the latest image that was built. + + `YAML` +{% highlight yaml %} +run_tests: + type: composition + composition: test_composition + composition_candidates: + test_service: + image: {% raw %}${{build_step}}{% endraw %} +{% endhighlight %} + +In the above example, both `composition` and `composition_candidates` define a service named `test_service`. After merging these definitions, `test_service` will maintain the `command` that was defined in the original composition but will refer to the image built by the step named `build_step`. + +## Related articles +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Variables]({{site.baseurl}}/docs/pipelines/variables/) +[Introduction to pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) + diff --git a/_docs/pipelines/steps/deploy.md b/_docs/pipelines/steps/deploy.md new file mode 100644 index 000000000..6b3d4b200 --- /dev/null +++ b/_docs/pipelines/steps/deploy.md @@ -0,0 +1,185 @@ +--- +title: "Deploy" +description: "Deploying to Kubernetes from a Codefresh pipeline" +group: codefresh-yaml +sub_group: steps +redirect_from: + - /docs/deploy/ +toc: true +--- +The *Deploy* step can be used as a step to deploy a pre-built Docker image to a cluster + +This step allows to (re)deploy a Kubernetes application in your cluster + +It has two modes: + +1. Using the `service` option. In this case it will redeploy to an [existing service/deployment in your cluster]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) . Codefresh will +automatically update the service/deployment with the new docker image. +1. Using the `file_path` option. In this case you provide your own Kubernetes manifest and Codefresh deploys it as-is. It is **your +own responsibility** to do [custom replacements]({{site.baseurl}}/docs/deploy-to-kubernetes/kubernetes-templating/) here (for example using [awk](https://en.wikipedia.org/wiki/AWK), [sed](https://www.gnu.org/software/sed/manual/sed.html) or [yq](http://mikefarah.github.io/yq/)). The deploy step is also using the [Codefresh templating mechanism]({{site.baseurl}}/docs/deploy-to-kubernetes/kubernetes-templating/#using-the-codefresh-deploy-image) behind the scenes if you want to take advantage of it. For a full templating solution we also +suggest you look at [Helm]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm/) . + +You need to define either one of these fields in the deploy step. If you define `service` you also can select the exact Docker image +with the `candidate` field (otherwise Codefresh will just reuse the docker image defined in the existing deployment) + +## Usage + + `YAML` +{% highlight yaml %} + step_name: + title: deploying to cluster + type: deploy + kind: kubernetes + ## cluster name as the shown in account's integration page + cluster: --my-cluster-name-- + # desired namespace + namespace: default + + ## Two ways to distinguish which deployment YAML to deploy - service or file_path: + # The Kubernetes service that associated with the deployment using selector + service: --my-service-- + # Path to deployment.yml location inside the image volume + file_path: ./deployment.yml + # In seconds, how long the step will wait until the rolling update is complete (default is 120) + timeout: '150' + # Candidate is optional, if not specified will redeploy the same image that specified in the deployment file + # When candidate exists it should have both: image and registry + candidate: + # The image that will replace the original deployment image + # The image that been build using Build step + image: {% raw %}${{build_step}}{% endraw %} + # The registry that the user's Kubernetes cluster can pull the image from + # Codefresh will generate (if not found) secret and add it to the deployment so the Kubernetes master can pull it + registry: dockerhub + # Condition to run the step + when: + branch: + only: + - master + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------------------- -------- | ------------------------- | +| `title` | The free-text display name of the step | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `kind` | Currently only `kubernetes` is supported | Required | +| `cluster` | Name of your K8s cluster as found in the dashboard | Required | +| `namespace` | Namespace where the deployment will take place | Required | +| `service` | Name of the existing service that will updated. You need to provide `service` OR `file_path` | Required/Optional | +| `file_path` | A deployment manifest. You need to provide `service` OR `file_path` | Required/Optional | +| `timeout` | Seconds to wait for the deployment to be completed. Default is 120 seconds | Default | +| `candidate` | Docker image that will be deployed. Only valid if `service` is defined. Should contain `image` and name of registry as it appears in the [registry integration page]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
            You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/piplines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Examples + +Update an existing service using the same Docker image (tagged with branch) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: python-flask-sample-app + working_directory: ./ + tag: ${{CF_BRANCH_TAG_NORMALIZED}} + dockerfile: Dockerfile + deploy_to_k8: + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myDemoAKSCluster + namespace: demo + service: my-python-app +{% endraw %} +{% endhighlight %} + +Update an existing service using a different Docker image (tagged with git hash) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: python-flask-sample-app + working_directory: ./ + tag: ${{CF_SHORT_REVISION}} + dockerfile: Dockerfile + deploy_to_k8: + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myDemoAKSCluster + namespace: demo + service: my-python-app + candidate: + # The image that will replace the original deployment image + # The image that been build using Build step + image: ${{MyAppDockerImage}} + # The registry that the user's Kubernetes cluster can pull the image from + # Codefresh will generate (if not found) secret and add it to the deployment so the Kubernetes master can pull it + registry: cfcr +{% endraw %} +{% endhighlight %} + + +Deploy a custom Kuberentes Manifest as is. (Only a deployment will be created) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: python-flask-sample-app + working_directory: ./ + tag: ${{CF_BRANCH}} + dockerfile: Dockerfile + deploy_to_k8: + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myDemoAKSCluster + namespace: demo + file_path: ./deploy/deployment.yml +{% endraw %} +{% endhighlight %} + +## Advanced Kubernetes deployments + +If you find the deploy step limited, feel free to look at the other deployment options offered by Codefresh: + +* [The cf-deploy-kubernetes step]({{site.baseurl}}/docs/deploy-to-kubernetes/kubernetes-templating/) +* [Custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/) +* [Helm]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm/) + +## Related articles +[Kubernetes Quick start guide]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/) +[Install HELM chart using Codefresh pipeline]({{site.baseurl}}/docs/new-helm/using-helm-in-codefresh-pipeline/) + + + diff --git a/_docs/pipelines/steps/freestyle.md b/_docs/pipelines/steps/freestyle.md new file mode 100644 index 000000000..6a1a8f2ee --- /dev/null +++ b/_docs/pipelines/steps/freestyle.md @@ -0,0 +1,350 @@ +--- +title: "Freestyle" +description: "Run commands inside a Docker container" +group: pipelines +sub_group: steps +toc: true +--- +The Freestyle step is designed so you can execute a series of commands in a container. Freestyle steps +are the bread and butter of [Codefresh pipelines]({{site.baseurl}}/docs/configure-ci-cd-pipeline/introduction-to-codefresh-pipelines/). + +## Purpose of freestyle steps + +In Codefresh, docker containers are first-class citizens +and special typed steps are offered for the most usual docker commands. Freestyle steps are a secure replacement for `docker run` commands. + + +Therefore, this command on your local workstation: + +``` +docker run python:3.6.4-alpine3.6 pip install . +``` + +will become in Codefresh the following freestyle step. + +```yaml +CollectAllMyDeps: + title: Install dependencies + image: python:3.6.4-alpine3.6 + commands: + - pip install . +``` + + +Select an image to start a container, then you can specify a working directory, and commands. +If you do not specify a working directory or commands, the step runs the organic commands specified by the image. +In all freestyle steps Codefresh automatically [uses a shared docker volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) that contains your git source code. + +## Usage + + `YAML` +{% highlight yaml %} +{% raw %} +step_name: + title: Step Title + description: Step description + image: image/id + working_directory: ${{step_id}} + commands: + - bash-command1 + - bash-command2 + cmd: + - arg1 + - arg2 + environment: + - key=value + entry_point: + - cmd + - arg1 + shell: sh + fail_fast: false + volumes: + - ./relative-dir-under-cf-volume1:/absolute-dir-in-container1 + - ./relative-dir-under-cf-volume2:/absolute-dir-in-container2 + when: + branch: + only: [ master ] + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endraw %} +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `image` | The image from which the executable container is created. It can be an explicit ID of a Docker image, or a variable that references a **Build** or **Push** step. | Required | +| `working_directory` | The directory from which the commands are executed. It can be an explicit path in the container's file system, or a variable that references another step. The default `working_directory` is the cloned repository directory and not the working directory specified by the image. If you need to use the default working directory of the image use `IMAGE_WORK_DIR`. | Default | +| `commands` | One or more commands to execute in a shell in the container, as array of strings. | Optional | +| `cmd` | docker CMD arguments to use along with the container entry point. can be string or array of strings. | Optional | +| `entry_point` | Override the default container entry point. can be string or array of strings. | Optional | +| `shell` | Explicitly set the executing shell to bash or sh. If not set the default will be sh. Note the `bash` option requires that you specify an `image` that includes `/bin/bash`; many images do not. | Optional | +| `environment` | A set of environment variables for the container. | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `registry_context` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +| `volumes` | One or more volumes for the container. All volumes must be mounted from the existing shared volume (see details below) |Optional +| `when` | Define a set of conditions that need to be satisfied in order to execute this step. You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +**Exported resources:** +- Working Directory. + +## Examples + +Here are some full pipelines with freestyle steps. Notice that in all cases the pipelines are connected to [git repositories]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-creation-modes) +so the source code is already checked out and available to all pipeline steps. + +**Creating a [JAR file]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/):** + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_jar_compilation: + title: Compile/Unit test + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package +{% endhighlight %} + +Note how we [cache Maven dependencies]({{site.baseurl}}/docs/example-catalog/ci-examples/java/spring-boot-2/#caching-the-maven-dependencies) using the internal Codefresh Volume. + +**Running unit tests in [Node.JS]({{site.baseurl}}/docs/example-catalog/ci-examples/nodejs/):** + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_node_app: + title: Running unit tests + image: node:11 + commands: + - npm install + - npm run test +{% endhighlight %} + +**Packaging a [GO application]({{site.baseurl}}/docs/example-catalog/ci-examples/golang-hello-world/):** + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_go_app: + title: Compiling GO code + image: golang:1.7.1 + commands: + - go get github.com/example-user/example-repo + - go build +{% endhighlight %} + +**Performing a [blue/green deployment](https://github.com/codefresh-io/k8s-blue-green-deployment):** + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + blueGreenDeploy: + title: Deploying new version + image: codefresh/k8s-blue-green:master + environment: + - SERVICE_NAME=my-demo-app + - DEPLOYMENT_NAME=my-demo-app + - NEW_VERSION=${{CF_SHORT_REVISION}} + - HEALTH_SECONDS=60 + - NAMESPACE=colors + - KUBE_CONTEXT=myDemoAKSCluster +{% endraw %} +{% endhighlight %} + +## Dynamic freestyle steps + +Codefresh has the unique ability to allow you to run freestyle steps in the context of a docker image +created on the same pipeline. This means that you can dynamically [create docker images]({{site.baseurl}}/docs/pipelines/steps/build/) on demand within the pipeline +that needs them. + +Creating a custom docker image with extra tools (Terraform and Ansible) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + CreateMyCustomImage: + title: Creating custom Docker image + type: build + dockerfile: tf_and_ansible.Dockerfile + image_name: my-iac-tools-container + UseMyCustomImage: + title: Running IAC tools + image: ${{CreateMyCustomImage}} + commands: + - terraform --version + - ansible --version +{% endraw %} +{% endhighlight %} + +Here the `UseMyCustomImage` freestyle step is running in the [context]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables) of the Docker image that was created in the previous step. +In fact, a very common pattern that you will see in Codefresh pipelines is the executions of [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) in the image that was created in a build step: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - ./my-unit-tests.sh +{% endraw %} +{% endhighlight %} + +Here the `MyAppDockerImage` step is creating a custom docker image. That image is used to run the `MyUnitTests` step. +This pattern works very well for cases where testing tools are already part of the image (usually with dynamic languages). +In other case you can have a second Dockerfile in your application that is designed explicitly to hold all your testing tools. + +## Entry point + +When using the original container entry point, you can use the `cmd` field to specify additional arguments to be used with the entry point. This can be a string, or an array of strings. For example: + +```yaml +image: mwendler/cowsay +cmd: + - "Hello" +``` + +is equivalent to running `docker run mwendler/cowsay Hello` which is equivalent to running `cowsay Hello` inside the container. + + +You can override the container's default entry point using the `entry_point` field. This can be a string, or an array of strings. For example: + +```yaml + +image: mwendler/cowsay +entry_point: + - echo + - Hello +``` + +## Commands + +When you use the `commands` field, it will override the container original `entry_point` and will execute the commands in a shell inside the container. +The provided commands are concatenated into a single command using the shell's `;` operator, and are run using the default shell `/bin/sh` as an entry point. +Additional settings that are set only when using commands are `set -e`, and the [`cf_export`]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command) utility. + +> Using complex commands in the freestyle step requires use of [YAML block scalars](http://stackoverflow.com/questions/3790454/in-yaml-how-do-i-break-a-string-over-multiple-lines). + +### Commands and Entry point + +If you want to retain the original entry point, do not use the `commands` field. + +However, this example: + +```yaml +image: mwendler/cowsay +commands: + - "Hello" +``` + +will cause and error because the engine will attempt to run the command `Hello` in a shell inside the container, and the command `Hello` is not a valid command. +In order to use the `commands` form with an `entrypoint` enabled container, you can add the commands from the entry point to the list of commands, like so: + +```yaml +image: mwendler/cowsay +commands: + - cowsay "Hello" +``` + +## Custom volumes + +If you are familiar with [Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) you should know that all freestyle steps automatically share a [volume](https://docs.docker.com/storage/) mounted at `/codefresh/volume` which can be used to transfer data (e.g. dependencies and test results) from each step to the next. + +**This volume is automatically mounted by Codefresh and needs no configuration at all**. All you have to do to access it, is read/write the `/codefresh/volume` folder from your application. This folder also [includes by default the source code]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) of the git repository connected to the pipeline (at the `/codefresh/volume/` subfolder) + +You can use the `volumes` property to create your own custom volumes that can be mounted in different folders. **For security reasons however all source volume data (i.e. the "host" folder) still needs to be bound with `/codefresh/volume` or any of its subdirectories**: + +Attempting to mount a folder outside of `/codefresh/volume` will result in an error. + +### Simple volume example + +Let's assume that your application expects to find a configuration folder at `/config`. The folder however that contains the needed files in GIT is under `my-app-repo/my-sample-config`. When the application is checked out the files actually reside at `/codefresh/volume/my-app-repo/my-sample-config`. + +You can still run your application without any code changes by doing the following bind: + +```yaml +title: Running my application with custom volume +image: my-docker-app:latest +volumes: + - ./my-app-repo/my-sample-config:/config # host path is relative to /codefresh/volume +``` + +Now the `my-docker-app` application will run and find all its needed files at `/config`. + +Notice that we use a relative path here but even if you used an absolute one (`/my-app/my-sample-config`) the result would be the same because Codefresh does not allow you to bind anything outside the shared Codefresh volume. + +### Injecting custom folders in a running container + +Here is another example pipeline with two steps. The first one creates a custom config file in the shared Codefresh volume (that is always available) at `/codefresh/volume/my-config`. The second step reads the config file at a different folder in `/my-own-config-folder-injected`. + +```yaml +version: '1.0' +steps: + CreateCustomConfiguration: + title: Creating configuration + image: alpine + commands: + - mkdir -p /codefresh/volume/my-config + - echo "foo=bar" > /codefresh/volume/my-config/custom.txt + - ls /codefresh/volume/my-config + InjectConfiguration: + title: Reading configuration + image: alpine + commands: + - ls /codefresh/volume/my-config # Codefresh default volume shared between all steps + - ls /my-own-config-folder-injected # Special volume just for this container + - cat /my-own-config-folder-injected/custom.txt + volumes: + - ./my-config:/my-own-config-folder-injected +``` + +When the second steps runs, the `custom.txt` file is available both at `/codefresh/volume/my-config` (the shared volume of all steps) as well as the `/my-own-config-folder-injected` folder which was mounted specifically for this step. + + +## More freestyle steps + +You can use in a freestyle step any Docker image available in a public repository such as Dockerhub. This makes the integration of Codefresh and various cloud tools very easy. + +Codefresh also offers a plugin directory at [http://codefresh.io/steps/](http://codefresh.io/steps/) created specifically for CI/CD operations. + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-directory.png" +url="/images/pipeline/plugin-directory.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="80%" +%} + + +## Related articles +[Introduction to Pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) + diff --git a/_docs/pipelines/steps/git-clone.md b/_docs/pipelines/steps/git-clone.md new file mode 100644 index 000000000..a457596c3 --- /dev/null +++ b/_docs/pipelines/steps/git-clone.md @@ -0,0 +1,439 @@ +--- +title: "Git-Clone" +description: "Checkout code in your pipelines" +group: pipelines +sub_group: steps +redirect_from: + - /docs/git-clone/ +toc: true +--- +Clones a Git repository to the filesystem. + +A pipeline can have any number of Git clone steps (even none). You can checkout code from any private or public repository. Cloning a repository is not constrained to the trigger of a pipeline. You can trigger a pipeline from a commit that happened on Git repository A while the pipeline is checking out code from Git Repository B. + +>Notice that if you are an existing customer before May 2019, Codefresh will automatically checkout the code from a [connected git repository]({{site.baseurl}}/docs/integrations/git-providers/) when a pipeline is created on that repository. In this case an implicit git clone step is included in your pipeline. You can still override it with your own git clone step as explained in this page + +## Usage + + `YAML` +{% highlight yaml %} +step_name: + type: git-clone + title: Step Title + description: Step description + working_directory: /path + repo: owner/repo + git: my-git-provider + revision: abcdef12345' + use_proxy: false + credentials: + username: user + password: credentials + fail_fast: false + when: + branch: + ignore: [ develop ] + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `working_directory` | The directory to which the repository is cloned. It can be an explicit path in the container's file system, or a variable that references another step. The default value is {% raw %}`${{main_clone}}`{% endraw %}, but note that the default will only be used if you name your step `main_clone`. See the example on [working inside the cloned directory]({{site.baseurl}}/docs/yaml-examples/examples/git-checkout/#working-inside-the-cloned-directory) for more information. | Default | +| `git` | The name of the [git integration]({{site.baseurl}}/docs/integrations/git-providers/) you want to use. If left empty, Codefresh will attempt to use the git provider that was used during account sign-up. Note that this might have unexpected results if you are changing your Git integrations.| Required| +| `repo` | path of the repository without the domain name in the form of `my_username/my_repo` | Required | +| `revision` | The revision of the repository you are checking out. It can be a revision hash or a branch name. The default value is the branch you have specified in your Git provider (e.g `master` or `main`). | Default | +| `depth` | The number of commits to pull from the repo to create a shallow clone. Creating a shallow clone truncates the history to the number of commits specified, instead of pulling the entire history. | Optional | +| `use_proxy` | If set to true the Git clone process will honor `HTTP_PROXY` and `HTTPS_PROXY` variables if present for [working via a proxy](#using-git-behind-a-proxy). Default value is `false`. | Default | +| `credentials` | Credentials to access the repository, if it requires authentication. It can an object containing `username` and `password` fields. Credentials are optional if you are using the [built-in git integrations]({{site.baseurl}}/docs/integrations/git-providers/) . | Optional | +| `fail_fast` | If a step fails and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions that need to be satisfied in order to execute this step. You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/codefresh-yaml/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +**Exported resources:** +- Working Directory + +{{site.data.callout.callout_info}} +If you want to extend the git-clone step you can use the freestyle step. Example how to do it you can find [here]({{site.baseurl}}/docs/yaml-examples/examples/git-clone-private-repository-using-freestyle-step/) +{{site.data.callout.end}} + +## Basic clone step (project-based pipeline) + +The easiest way to use a git clone step is to use your default git provider as configured in [built-in git integrations]({{site.baseurl}}/docs/integrations/git-providers/). + +Here is an example of a pipeline that will automatically check out the repository that triggered it (i.e. a commit happened on that repository). + +>Notice that the name of the clone step is `main_clone`. This will automatically set the working directory of all other steps that follow it **inside** the folder of the project that was checked out. This only applies to [built-in]({{site.baseurl}}/docs/pipelines/steps/#built-in-steps) Codefresh steps and not [custom plugins]({{site.baseurl}}/docs/pipelines/steps/#creating-a-typed-codefresh-plugin). This is normally what you want for a pipeline that only checks out a single project. If you use any other name apart from `main_clone` the working directory for all subsequent steps will not be affected and it will default on the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) which is the [parent folder]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) of checkouts. + + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: my-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +The CF values will be automatically filled by Codefresh from the git trigger. See the [variables page]({{site.baseurl}}/docs/pipelines/variables/) for more details. + +## Choosing a specific git provider (project-based pipeline) + +If you don't want to use the default git provider you can explicitly set the provider by using the same name of the integration as it is shown in [the git integrations page]({{site.baseurl}}/docs/integrations/git-providers/). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/example-git-providers.png" +url="/images/pipeline/codefresh-yaml/steps/example-git-providers.png" +alt="Example git integrations" +caption="Example git integrations" +max-width="40%" +%} + +Here is an example for an integration with the GitLab provider already connected: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: my-gitlab + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +## Checkout a specific repository/revision (project based pipeline) + +If you want to check out a specific git repository regardless of what repository actually created the trigger +you can just define all values in a non-static manner. For example, if you want your pipeline to always checkout git repository `foo` even when the trigger happened from repository `bar` you can define the checkout step as below: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: 'my-github-username/foo' + revision: '${{CF_REVISION}}' + git: my-github-integration + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +In a similar manner you can also define that the pipeline will always checkout master, regardless of the commit that actually triggered it. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: 'master' + git: my-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +## Checkout code using the Codefresh Runner + +If you are using the [Codefresh runner]({{site.baseurl}}/docs/installation/codefresh-runner/), you need to use +the fully qualified path of the Git repository: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: https://github-internal.example.com/my-username/my-app + revision: '${{CF_REVISION}}' + git: my-internal-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +More details can be found in the [private Git instructions page]({{site.baseurl}}/docs/reference/behind-the-firewall/#checking-out-code-from-a-private-git-repository). + + +## Checking out multiple Git repositories + +It is very easy to checkout additional repositories in a single pipeline by adding more `git-clone` steps. +In that case you should use different names for the steps (instead of `main_clone`) as this will make the working +folder for all steps the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_checkout: + title: 'Cloning first repository...' + type: git-clone + repo: 'my-gitlab-username/foo' + revision: '${{CF_REVISION}}' + git: my-gitlab-integration + my_second_checkout: + title: 'Cloning second repository...' + type: git-clone + repo: 'my-github-username/bar' + revision: '${{CF_REVISION}}' + git: my-github-integration + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + + +## Skip or customize default clone (repository-based pipeline) + +If you have existing pipelines connected to repositories (only for Codefresh accounts created before May 2019) +a git clone step is transparently added to git attached pipelines without you having to explicitly add a step into the pipeline. This is a convenience to enable easy CI pipelines. +If you do not require git cloning, or you would like to customize the implicit git cloning behavior, you can choose to skip the automatically added git clone step. + +There are 2 ways to do that: + +1. Add a pipeline environment variable called `CF_SKIP_MAIN_CLONE` with value of `true`. + +-or- + +2. Add a step with key `main_clone` to your pipeline. This step can be of any type and can do any action. This step will override the default clone implementation. for example: + +```yaml +version: '1.0' +steps: + main_clone: + title: Checking out code + image: alpine/git:latest + commands: + - git clone ... + another_step: + ... +``` + +## Reuse a Git token from Codefresh integrations + +You also have the capability to use one of your existing [git integrations]({{site.baseurl}}/docs/integrations/git-providers/) +as an authentication mechanism. + +The [Codefresh CLI](https://codefresh-io.github.io/cli/) can read one of the connected [git authentication contexts](https://codefresh-io.github.io/cli/contexts/get-context/) and use that token for a custom clone step. + +Here is an example for GitHub + + +```yaml +version: '1.0' +steps: + get_git_token: + title: Reading GitHub token + image: codefresh/cli + commands: + - cf_export GITHUB_TOKEN=$(codefresh get context github --decrypt -o yaml | yq -r .spec.data.auth.password) + main_clone: + title: Checking out code + image: alpine/git:latest + commands: + - git clone https://my-github-username:$GITHUB_TOKEN@github.com/my-github-username/my-repo.git + another_step: + ... +``` + +## Working with GIT submodules + +To checkout a git project including its submodules you can use the [Codefresh submodule plugin](https://github.com/codefresh-io/plugins/tree/master/plugins/gitsubmodules). This plugin is already offered as a public docker image at [Dockerhub](https://hub.docker.com/r/codefresh/cfstep-gitsubmodules/tags). + +To use this module in your pipeline, add a new step like the one shown below. + +```yaml +version: '1.0' +steps: + updateSubmodules: + image: codefresh/cfstep-gitsubmodules + environment: + - GITHUB_TOKEN= + - CF_SUBMODULE_SYNC= + - CF_SUBMODULE_UPDATE_RECURSIVE= +``` + +The GitHub token can be either defined in the pipeline on its own as an environment variable, or fetched from +the existing [GIT integration]({{site.baseurl}}/docs/integrations/git-providers/) as shown in the previous section. + +Here is full pipeline example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - prepare + - build +steps: + clone: + title: Cloning the repository + type: git-clone + stage: checkout + arguments: + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + git: github + revision: '${{CF_REVISION}}' + + updateSubmodules: + image: codefresh/cfstep-gitsubmodules + stage: prepare + working_directory: '${{clone}}' + environment: + - GITHUB_TOKEN=${{MY_GITHUB_TOKEN}} + docker_build: + title: Building docker image + type: build + stage: build + working_directory: '${{clone}}/k8s/docker' + tag: current + disable_push: true + image_name: 'my-docker-image' + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the main source code +1. Updates submodules +1. Creates a docker image + + +## Use an SSH key with Git + +It is also possible to use an SSH key with git. When [creating your pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) add your SSH key as an encrypted +environment variable after processing it with `tr`: + +``` +cat ~/.ssh/my_ssh_key_file | tr '\n' ',' +``` + + +Then in the pipeline use it like this: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Checking out code + image: alpine/git:latest + commands: + - mkdir -p ~/.ssh + - echo "${SSH_KEY}" | tr \'"${SPLIT_CHAR}"\' '\n' > ~/.ssh/id_rsa + - chmod 600 ~/.ssh/id_rsa + - git clone git@github.com:my-github-username/my-repo.git + # can also use go get or other similar command that uses git internally + another_step: + ... +{% endraw %} +{% endhighlight %} + +## Using Git behind a proxy + +If you use the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) and need to use a network proxy in your clone step you need to set the [variables]({{site.baseurl}}/docs/pipelines/variables/) `HTTP_PROXY` and/or `HTTPS_PROXY` in the pipeline +and then activate the property `use_proxy: true` in the clone step. Example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "https://github.com/my-github-user/my-repo/" + revision: "master" + use_proxy: true + git: my-git-provider +{% endraw %} +{% endhighlight %} + +For setting the values of the proxy variables you can use any of the supported methods for defining variables such as [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/). + + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/proxy-variables.png" +url="/images/pipeline/codefresh-yaml/steps/proxy-variables.png" +alt="Pipeline variable" +caption="Pipeline variable" +max-width="40%" +%} + +For more details see the [behind the firewall page]({{site.baseurl}}/docs/installation/behind-the-firewall/). + + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Git integrations]({{site.baseurl}}/docs/integrations/git-providers/) +[YAML steps]({{site.baseurl}}/docs/pipelines/steps/) +[Git Checkout Examples]({{site.baseurl}}/docs/yaml-examples/examples/git-checkout/) +[Custom Git Commands]({{site.baseurl}}/docs/yaml-examples/examples/git-checkout-custom/) + + + + + + diff --git a/_docs/pipelines/steps/launch-composition.md b/_docs/pipelines/steps/launch-composition.md new file mode 100644 index 000000000..a3bce2d5c --- /dev/null +++ b/_docs/pipelines/steps/launch-composition.md @@ -0,0 +1,92 @@ +--- +title: "Launch-Composition" +description: "Create a test environment with its dependencies in Codefresh infrastructure" +group: pipelines +sub_group: steps +redirect_from: + - /docs/launch-composition-2/ + - /docs/codefresh-yaml/steps/launch-composition-2/ +toc: true +--- +The Launch Composition step provides the ability to launch long term running environments that can live outside the context of a running pipeline. +You can use this step to automate your test environment creation through a codefresh.yml file instead of manually launching an environment from the UI. + +>Note that "launch-composition" creates a permanent test environment that keeps running even after a pipeline has finished. If you just want temporary test environments that run *only while* a pipeline is running, see [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) and the documentation page for [integration tests]({{site.baseurl}}/docs/testing/integration-tests/). + +## Usage + + `ui defined composition` +{% highlight yaml %} +step_name: + title: Step Title + type: launch-composition + composition: 'ui_defined_composition_name' + environment_name: 'environment name' + on_success: + ... + on_fail: + ... + on_finish: + ... +{% endhighlight %} + + `inline composition` +{% highlight yaml %} +step_name: + type: launch-composition + composition: + version: '2' + services: + app: + image: owner/app:latest + db: + image: mongo + environment_name: 'environment name' + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + + `from file composition` +{% highlight yaml %} +step_name: + type: launch-composition + working_directory: ${{a_clone_step}} + composition: './path/to/docker-compose.yaml' + environment_name: 'environment name' + on_success: + ... + on_fail: + ... + on_finish: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `working_directory` | The directory in which to search for the composition file. It can be an explicit path in the container's file system, or a variable that references another step.
            The default is {% raw %}`${{main_clone}}`{% endraw %}. | Default | +| `composition` | The composition you want to run. It can be an inline YAML definition, a path to a composition file on the file system, or the logical name of a composition stored in the Codefresh system. | Required | +| `environment_name` | The environment name that will be given. In case a previous environment exists with the same name, it will first be terminated. The default value will the be the name/path provided in the 'composition' field. | Default | +| `composition_variables` | A set of environment variables to substitute in the composition. | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
            You can find more information in the [[Conditional Execution of Steps]({{ site.baseurl }}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{ site.baseurl }}/docs/pipelines/post-step-operations/). | Optional | +| entry_point | The name of main service | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Related articles +[Preview environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) +[Launch Composition example]({{site.baseurl}}/docs/yaml-examples/examples/launch-composition/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Service Containers]({{site.baseurl}}/docs/pipelines/service-containers/) \ No newline at end of file diff --git a/_docs/pipelines/steps/push.md b/_docs/pipelines/steps/push.md new file mode 100644 index 000000000..699f02004 --- /dev/null +++ b/_docs/pipelines/steps/push.md @@ -0,0 +1,257 @@ +--- +title: "Push step" +description: "Pushing Docker images from your pipeline" +group: pipelines +sub_group: steps +redirect_from: + - /docs/push-1/ + - /docs/codefresh-yaml/steps/push-1/ +toc: true +--- + +{{site.data.callout.callout_info}} + +If you use only the default Docker registry of your account this step is optional as all successful Codefresh pipelines automatically push the Docker image they create in the default Docker registry. No further configuration is needed to achieve this behavior. +{{site.data.callout.end}} + +Push a built image to a remote Docker registry with one or more tags. Supports standard Docker registries and ECR. + +Notice that when you use [any external registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/), you need to comply to the naming pattern used by that registry, otherwise the build step will fail. For example, if your Codefresh image is tagged as `foo_username/my_image` but your Dockerhub account is `bar_username` then the build will fail and you need to customize the push step to use `bar_username` instead. This is a limitation of external registries such as Dockerhub. + +## Usage + + `YAML` +{% highlight yaml %} +step_name: + type: push + title: Step Title + description: Free text description + candidate: {% raw %}${{build_step_name}}{% endraw %} + tag: latest + image_name: codefresh/app + registry: my-registry + fail_fast: false + when: + branch: + only: + - /FB-/i + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... + +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `candidate` | The identifier of the image to push to the remote Docker registry. It can be an explicit identifier of an image to push, or a variable that references a `Build` step. | Required | +| `tag` | The tag under which to push the image. Use either this or `tags`.
            The default is `latest`. | Default | +| `region` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The names of the regions for which to perform cross-region replication. The names of the source region and the destination region name must be defined in separate steps. | Optional | +| `role_arn` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The role with the required permissions to use to pull the image. For example, `arn:aws:iam:::role/` | Required | +| `aws_session_name` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The name of the AWS session. If not defined, `default-session-name` is used. | Default | +| `aws_duration_seconds` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The length of time, in seconds, for which the role credentials are considered valid, and must be between `900-3600` seconds. If not defined, the duration is set to the default of `3600` seconds. | Default | +| `tags` | Multiple tags under which to push the image. Use either this or `tag`. This is an array, so should be of the following style:
            {::nomarkdown}
            tags:
            - tag1
            - tag2
            - {% raw %}${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}}{% endraw %}
            - tag4
            {:/}or
            {::nomarkdown}
            tags: [ 'tag1', 'tag2', '{% raw %}${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}}{% endraw %}', 'tag4' ]
            {:/} | Default | +| `image_name` | The tagged image name that will be used The default value will be the same image name as of the candidate. | Default | +| `registry` | The registry logical name of one of the inserted registries from the integration view.
            The default value will be your default registry [if you have more than one]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). | Default | +| `registry_context` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
            You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Examples + +Push an image to a registry connected with the [integration name]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) of `myazureregistry`. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + pushToMyRegistry: + stage: 'my push phase' + type: push + title: Pushing to a registry + candidate: ${{MyAppDockerImage}} + tag: ${{CF_SHORT_REVISION}} + registry: myazureregistry +{% endraw %} +{% endhighlight %} + +Push an image as the name of the branch in the [external registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) and also use a different image than the default. The same image will also by pushed as `latest` in the internal Codefresh registry (with the default name of `my-app-image`). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + tag: latest + pushToMyRegistry: + stage: 'my push phase' + type: push + title: Pushing to a registry + candidate: ${{MyAppDockerImage}} + tag: ${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}} + registry: myazureregistry + image_name: my-user-name/a-different-image-name +{% endraw %} +{% endhighlight %} + + +Push an image with multiple tags. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + pushToMyRegistry: + stage: 'my push phase' + type: push + title: Pushing to a registry + candidate: ${{MyAppDockerImage}} + tags: + - ${{CF_SHORT_REVISION}} + - latest + - 2.0.0 + registry: myazureregistry +{% endraw %} +{% endhighlight %} + +Push an image with multiple tags to multiple Docker registries in [parallel]({{site.baseurl}}/docs/pipelines/advanced-workflows/). +Both registries are connected first in the [integrations page]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + PushingToRegistries: + type: parallel + stage: 'push' + steps: + PushingToGoogleRegistry: + type: push + title: Pushing To Google Registry + candidate: ${{MyAppDockerImage}} + tags: + - ${{CF_BUILD_ID}} + - latest + - production + registry: gcr + PushingToDockerRegistry: + type: push + title: Pushing To Dockerhub Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + image_name: my-docker-hub-username/my-app-name + registry: dockerhub +{% endraw %} +{% endhighlight %} + + +## Using passed credentials without pre-saving them + +This option enables you to push your images without pre-saving the credentials in Codefresh's registry integration view. + +>Note that this method of pushing images is offered as a workaround. The suggested way is to use the [central Codefresh integration for registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) as explained in the previous section. + + `YAML` +{% highlight yaml %} +step_name: + type: push + title: Step Title + description: Free text description + candidate: {% raw %}${{build_step_name}}{% endraw %} + tags: [ latest, {% raw %}${{CF_BRANCH}}{% endraw %} ] + image_name: codefresh/app + registry: dtr.host.com + credentials: + username: subject + password: credentials + fail_fast: false + when: + branch: + only: + - /FB-/i + on_success: + ... + on_fail: + ... + on_finish: + ... +{% endhighlight %} + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ---------------------------- | ------------------------------------ | ----------------------------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `provider` | The type of Docker registry provider. Can currently be either `docker` for a standard Docker registry, or `ecr` for the [Amazon EC2 Container Registry (ECR)](https://aws.amazon.com/ecr/). | Optional
            *Default value*: `docker` | +| `candidate` | The identifier of the image to push to the remote Docker registry. It can be an explicit identifier of an image to push, or a variable that references a `Build` step. | Required | +| `tag` | The tag under which to push the image. Use either this or `tags`.
            The default is `latest`. | Default | +| `tags` | Multiple tags under which to push the image. Use either this or 'tag'.
            This is an array, so should be of the following style:
            {::nomarkdown}
            tags:
            - tag1
            - tag2
            - {% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}
            - tag4
            {:/}or
            {::nomarkdown}
            tags: [ 'tag1', 'tag2', '{% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}', 'tag4' ]
            {:/} | Default | +| `image_name` | The tagged image name that will be used. The default value will be the same image name as of the candidate. | Default | +| `registry` | The host address where the registry is located. The default is the registry configured in your Codefresh account, or Dockerhub. | Default
            **Ignored when provider is** `ecr` | +| `credentials` | Credentials to access the registry if it requires authentication. It can be a has object containing `username` and `password` fields. The default is the credentials configured in your Codefresh account. | Optional
            **Ignored when provider is** `ecr` | +| `accessKeyId` | Your AWS access key. | Optional
            **Ignored when provider is** `docker` | +| `secretAccessKey` | Your AWS secret access key. | Optional
            **Ignored when provider is** `docker` | +| `region` | The region where the ECR registry is accessible. | Optional
            **Ignored when provider is** `docker` | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. |Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
            You can find more information in [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/).| Optional | + +**Exported resources:** +- Image ID. + +## Related articles +[External Registry integrations]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +[Custom Image annotations]({{site.baseurl}}/docs/docker-registries/metadata-annotations/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) \ No newline at end of file diff --git a/_docs/pipelines/triggers.md b/_docs/pipelines/triggers.md new file mode 100644 index 000000000..19137145f --- /dev/null +++ b/_docs/pipelines/triggers.md @@ -0,0 +1,114 @@ +--- +title: "Triggers in pipelines" +description: "Choose when your pipelines should run" +group: pipelines +redirect_from: + - /docs/pipeline-triggers/ + - /docs/pipeline-triggers/introduction-triggers/ +toc: true +--- + + +To create an effective CI/CD process, it should be possible to trigger a Codefresh pipeline execution not only on code repository events (like `push` or `PR`), but also on any "interesting" CD-related event, coming from some external system. + +Codefresh not only allows you to define different pipelines on a single project but it also offers you the capability to trigger them with completely separate mechanisms. + + +## Pipeline trigger types + +The following types of triggers are currently supported pipelines: + +* [Git triggers](git-triggers) +* [Dockerhub triggers](dockerhub-triggers) +* [Azure Registry triggers](azure-triggers) +* [Quay triggers](quay-triggers) +* [Helm triggers](helm-triggers) +* [Artifactory triggers](jfrog-triggers) +* [Cron trigger](cron-triggers) +* [API/CLI trigger]({{site.baseurl}}/docs/integrations/codefresh-api/) + +As an example, this project contains four pipelines: + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/pipeline-examples.png" +url="/images/pipeline/triggers/pipeline-examples.png" +alt="Sample pipelines" +caption="Sample pipelines" +max-width="70%" +%} + +Behind the scenes these pipelines are triggered from different events: + +* Pipeline "CI-build" uses a GIT trigger and starts after every commit to the code repository +* Pipeline "Sonarcloud" is executed every weekend using a cron (timed) trigger +* Pipeline "integration-test" is executed whenever a commit happens in a Pull request on the code +* Pipeline "deploy-prod-k8s" is executed whenever a Docker image is pushed to the Docker registry + +This is just an example. You are free to create your own triggers that match your own internal process. +It is also possible to add multiple triggers for a pipeline so that it is executed for more than one type of events. + +If a pipeline has no defined trigger you can still start it manually. + +For all trigger types you can also use the [Codefresh CLI](https://codefresh-io.github.io/cli/triggers/) to manage them. + + + +## Creating a new trigger for a pipeline + +By default, when you create a new project from a Git provider, it will start with a Git trigger that runs on every commit. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/default-git-trigger.png" +url="/images/pipeline/triggers/default-git-trigger.png" +alt="Default GIT Trigger" +caption="Default GIT Trigger" +max-width="50%" +%} + +You can either delete this trigger, modify it, or add new ones. + +To add a new trigger, go to the *Triggers* tab in your pipeline editor and click the *Add Trigger* button. This will bring up the respective dialog where you are adding a new trigger. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +caption="Adding new Trigger dialog" +max-width="70%" +%} + +For more information see: + +* [Git triggers](git-triggers) +* [Dockerhub triggers](dockerhub-triggers) +* [Azure Registry triggers](azure-triggers) +* [Quay triggers](quay-triggers) +* [Helm triggers](helm-triggers) +* [Artifactory triggers](jfrog-triggers) +* [Cron trigger](cron-triggers) + +## Disabling triggers + +You can easily disable a trigger manually if you don't want it to be active anymore. +On the triggers tab, click the gear icon on the top right (*Open advanced options*). + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/enable-triggers.png" +url="/images/pipeline/triggers/enable-triggers.png" +alt="Toggle a trigger on/off" +caption="Toggle a trigger on/off" +max-width="70%" +%} + + +Then click the toggle switch on each trigger that you want to enable/disable. You can later enable the same trigger again +by clicking the same switch. + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/) +[Trigger a Kubernetes Deployment from a Dockerhub Push Event]({{site.baseurl}}/docs//yaml-examples/examples/trigger-a-k8s-deployment-from-docker-registry/) diff --git a/_docs/pipelines/triggers/azure-triggers.md b/_docs/pipelines/triggers/azure-triggers.md new file mode 100644 index 000000000..5356be27a --- /dev/null +++ b/_docs/pipelines/triggers/azure-triggers.md @@ -0,0 +1,88 @@ +--- +title: "Azure Registry trigger" +description: "Trigger Codefresh pipelines from Azure Registry events" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/pipeline-triggers/configure-azure-trigger/ +toc: true +--- + +Define and manage Azure Registry triggers for pipelines with the Codefresh UI. + +This allows you to trigger Codefresh pipelines when an Azure Registry event happens (e.g. a new Docker image is uploaded). + +## Manage Azure triggers with Codefresh UI + + +The process involves two parts: + +1. Creating a trigger in Codefresh. This will result in a special Codefresh webhook URL. +1. Creating a new notification in the Azure Registry that will use this URL to call Codefresh. + +> Make sure that you have an Azure cloud account and have already [created a registry](https://docs.microsoft.com/en-us/azure/container-registry/). + + +### Create a new Azure trigger + +To add a new Azure trigger, navigate to a Codefresh Pipeline *Configuration* view and expand the *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="40%" +%} + +Fill the following information: + +* Registry Provider - select `Azure`. +* *Name of Registry* - put Azure name of registry (without `.azurecr.io`). +* *Image Repository Name* - Azure image repository name. +* *Action* - select `Push Image` action. +* *Tags* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/azure/add-trigger-dialog.png" +url="/images/pipeline/triggers/azure/add-trigger-dialog.png" +alt="Azure Registry settings" +max-width="50%" +%} + +Click next and a new dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/azure/view-trigger-dialog.png" +url="/images/pipeline/triggers/azure/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set Azure to call this URL when an event takes place. + +### Set up Azure notification + +The easiest way to create an Azure trigger is with the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/acr/webhook?view=azure-cli-latest#az-acr-webhook-create) (Also available in the Azure portal) + +Here is the command: + +{% highlight shell %} +{% raw %} +az acr webhook create -n MyWebhook -r kostisregistry --uri "https://g.codefresh.io/nomios/azure?account=409f15bdd444&secret=7zyg5Zhb8xYBn4ms" --actions push delete +{% endraw %} +{% endhighlight %} + +The name can be anything you want. The URI is the Codefresh URL that was created in the previous step. + + +### Triggering a Codefresh pipeline with Azure push + +Now, every time you push a new Docker image to the selected Azure Docker repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Azure Push trigger event. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/pipelines/triggers/cron-triggers.md b/_docs/pipelines/triggers/cron-triggers.md new file mode 100644 index 000000000..938382533 --- /dev/null +++ b/_docs/pipelines/triggers/cron-triggers.md @@ -0,0 +1,104 @@ +--- +title: "Cron Trigger" +description: "Run pipelines with a time schedule" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/configure-cron-trigger/ + - /docs/pipeline-triggers/configure-cron-trigger/ +toc: true +--- + +Cron triggers allow you to create pipelines that start on a specific time schedule. This is very useful for cleanup jobs or periodic checks or any other workflow that needs to run after a time interval. + +>All times mentioned in Cron triggers use the UTC time zone. + +## Manage Cron Triggers with Codefresh UI + +It is possible to define and manage Cron-based pipeline triggers with Codefresh UI. + +### Create a new Cron Trigger + +To add a new Cron trigger, navigate to Codefresh Pipeline *Configuration* view and expand *Triggers* section. Press the `Add Trigger` button and select a `Cron` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + + +Visit [this page](https://github.com/codefresh-io/cronus/blob/master/docs/expression.md) to learn about supported `cron` expression format and aliases. + + +Fill the following information: + +* Use Cron helper wizard to build a valid `cron` expression or write custom `cron` expression on the last tab. +* Add a free text message, that will be sent as an additional event payload every time `cron` is executed. + +{% include image.html +lightbox="true" +file="/images/cron_trigger.png" +url="/images/cron_trigger.png" +alt="Add Cron Trigger" +max-width="70%" +%} + + +### Trigger Codefresh pipeline with cron timer + +Now, `cron` will trigger a recurrent pipeline execution based on the defined `cron expression`. + +## Manage Cron Triggers with Codefresh CLI + +It is also possible to use the Codefresh Command Line client (`CLI`) to manage Cron based pipeline triggers. + +### Cron trigger + +It is possible to trigger a Codefresh CD pipeline(s) periodically, using `cron` expression. + +You can use [Codefresh CLI](https://cli.codefresh.io/) to setup a Codefresh `cron` trigger. + +#### Create Cron trigger-event + +First, you need to create a new `cron` `trigger-event` to define a recurrent event. + +```sh +# create DockerHub recurrent event 'once in 20 minutes' +codefresh create trigger-event --type cron --kind codefresh --value expression="0 */20 * * * *" --value message="hello-once-in-20-min" + +# on success trigger-event UID will be printed out +Trigger event: "cron:codefresh:codefresh:0 */20 * * * *:hello-once-in-20-min:107e9db97062" was successfully created. +``` + +When creating a `cron trigger-event`, it is possible to specify a short text message, that will be passed to linked pipelines, every time the specified `cron` timer is triggered. + +Visit [this page](https://github.com/codefresh-io/cronus/blob/master/docs/expression.md) to learn about the supported `cron` expression format and aliases. + +#### Set up pipeline trigger + +Now, lets create a new pipeline trigger, linking previously defined `cron` `trigger-event` to one or more Codefresh pipelines. + +```sh +# create trigger, linking trigger-event UID to the pipeline UID +codefresh create trigger "cron:codefresh:codefresh:0 */20 * * * *:hello-once-in-20-min:107e9db97062" 7a5622e4b1ad5ba0018a3c9c + +# create another trigger, linking the same trigger-event to another pipeline +codefresh create trigger "cron:codefresh:codefresh:0 */20 * * * *:hello-once-in-20-min:107e9db97062" 4a5634e4b2cd6baf021a3c0a +``` + +From now on, every 20 minutes Codefresh will trigger a pipeline execution for 2 pipelines linked to the previously specified `cron` `trigger-event` (once in 20 minutes) + +#### Cron Event payload + +The following variables will be available for any Codefresh pipeline linked to a `cron` `trigger-event`: + +- `EVENT_MESSAGE` - free text message (specified during creation) +- `EVENT_TIMESTAMP` - event timestamp in RFC 3339 format + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) + diff --git a/_docs/pipelines/triggers/dockerhub-triggers.md b/_docs/pipelines/triggers/dockerhub-triggers.md new file mode 100644 index 000000000..1268e4d80 --- /dev/null +++ b/_docs/pipelines/triggers/dockerhub-triggers.md @@ -0,0 +1,152 @@ +--- +title: "DockerHub triggers" +description: "" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/configure-dockerhub-trigger/ + - /docs/pipeline-triggers/configure-dockerhub-trigger/ +toc: true +--- + + +You can define and manage DockerHub triggers in Codefresh. + +### Create a new DockerHub trigger in Codefresh UI + +To add a new DockerHub trigger, navigate to Codefresh Pipeline *Configuration* view and expand *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + +Fill the following information: + +* *Registry Provider* - select `DockerHub`. +* *User/Organization Name* - put DockerHub user name or organization name here. +* *Image Repository Name* - DockerHub image repository name. +* *Action* - select `Push Image` action. +* *Tag* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/dockerhub/dockerhub_trigger_1.png" +url="/images/pipeline/triggers/dockerhub/dockerhub_trigger_1.png" +alt="Add Registry Trigger" +max-width="70%" +%} + +### Setup DockerHub Webhook + +Currently Codefresh does not support automatically setting up a DockerHub webhook. You need to do this manually. Press the *Next* button and see detailed instructions with URL links and secrets of how-to setup a DockerHub Webhook. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/dockerhub/dockerhub_trigger_2.png" +url="/images/pipeline/triggers/dockerhub/dockerhub_trigger_2.png" +alt="Add Webhook" +max-width="70%" +%} + +1. Copy `Endpoint` URL +1. Visit DockerHub image settings page following link in help +1. Add a new DockerHub Webhook with previously copied `Endpoint` URL + +### Triggering Codefresh pipeline with DockerHub push + +Now, every time you push a new Docker image to selected DockerHub repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with this DockerHub Push trigger event. + +## Manage DockerHub triggers with Codefresh CLI + +It is possible to use `codefresh` command line client (`CLI`) to manage DockerHub pipeline triggers. + +### Docker Hub Trigger + +It is possible to trigger Codefresh CD pipeline(s) when a new Docker image pushed into DockerHub. + +You can use [Codefresh CLI](https://cli.codefresh.io/) to setup a Codefresh trigger for DockerHub. + +#### Create DockerHub trigger-event + +First, create a `trigger-event` for every DockerHub image, you would like to setup a Codefresh trigger. + +``` +# create DockerHub trigger event for codefresh/fortune +codefresh create trigger-event --type registry --kind dockerhub --value namespace=codefresh --value name=fortune --value action=push + +# on success trigger-event UID will be printed out +Trigger event: registry:dockerhub:codefresh:fortune:push:107e9db97062 was successfully created. +``` + +#### Set up DockerHub webhook + +Currently, an additional manual action is required to bind DockerHub `push` image event to the Codefresh `trigger-event`. + +``` +# get trigger-event details for previously created trigger-event +codefresh get trigger-event -o yaml registry:dockerhub:codefresh:fortune:push:107e9db97062 +``` + +... command output: + +```yaml +uri: 'registry:dockerhub:codefresh:fortune:push:107e9db97062' +type: registry +kind: dockerhub +public: false +secret: aGao5weuez2G6WF9 +status: active +endpoint: >- + https://g.codefresh.io/nomios/dockerhub?account=107e9db97062&secret=aGao5weuez2G6WF9 +description: Docker Hub codefresh/fortune push event +help: >- + Docker Hub webhooks fire when an image is built in, pushed or a new tag is + added to, your repository. + + + Configure Docker Hub webhooks on + https://hub.docker.com/r/codefresh/fortune/~/settings/webhooks/ + + + Add following Codefresh Docker Hub webhook endpoint + https://g.codefresh.io/nomios/dockerhub?account=107e9db97062&secret=aGao5weuez2G6WF9 +``` + +1. Copy `endpoint` URL +1. Visit DockerHub settings page [https://hub.docker.com/r/codefresh/fortune/~/settings/webhooks/](https://hub.docker.com/r/codefresh/fortune/~/settings/webhooks/). +1. Add a new Webhook with previously copied `endpoint` URL. + + +#### Set up pipeline trigger + +Now, lets set up a new pipeline trigger, linking previously defined DockerHub push `codefresh/fortune` `trigger-event` to one or more Codefresh pipelines. + +``` +# create trigger, linking trigger-event UID to the pipeline UID +codefresh create trigger "registry:dockerhub:codefresh:fortune:push:107e9db97062" 7a5622e4b1ad5ba0018a3c9c + +# create another trigger, linking the same trigger-event to another pipeline +codefresh create trigger "registry:dockerhub:codefresh:fortune:push:107e9db97062" 4a5634e4b2cd6baf021a3c0a +``` + +From now on, Codefresh will trigger pipeline execution when new `codefresh/fortune` image is pushed to the DockerHub. + +#### DockerHub Event payload + +The following variables will be available for any Codefresh pipeline linked to a DockerHub `trigger-event`: + +- `EVENT_NAMESPACE` - DockerHub namespace (alias `organization`). +- `EVENT_NAME` - DockerHub image name (alias `repository`). +- `EVENT_TAG` - Docker image tag. +- `EVENT_PUSHER` - user who pushed this Docker image. +- `EVENT_PUSHED_AT` - timestamp for push event. +- `EVENT_PAYLOAD` - original DockerHub Webhook JSON payload. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) diff --git a/_docs/pipelines/triggers/git-triggers.md b/_docs/pipelines/triggers/git-triggers.md new file mode 100644 index 000000000..de0a551ee --- /dev/null +++ b/_docs/pipelines/triggers/git-triggers.md @@ -0,0 +1,371 @@ +--- +title: "Git triggers" +description: "Learn how to run pipelines from Git events" +group: pipelines +sub_group: triggers +toc: true +--- + +Git triggers are the most basic of the trigger typesfor performing [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) with Codefresh. + +At the trigger level, you can select: + +* Which code repository will be used as a trigger +* Which branches will be affected by a pipeline +* If a trigger will apply to a Pull Request (PR) or not + +> You can select a repository other than the one the project itself belongs to. It is possible + to trigger a build on project A even though a commit happened on project B. + +You can also use [conditional expressions]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) at the pipeline level to further fine-tune the way specific steps (or other transitive pipelines) are executed. + +## Manage GIT triggers with Codefresh UI + +To add a new GIT trigger, navigate to the Codefresh Pipeline *Configuration* view and expand the *Triggers* section on the right side. Press the *Add Trigger* button and select a *GIT* trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + +## General trigger Settings + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-git-trigger.png" +url="/images/pipeline/triggers/add-git-trigger.png" +alt="Adding GIT Trigger" +max-width="50%" +%} + +The Git trigger is comprised of the following settings: + +* *Trigger Name* - a freetext trigger name (required). +* *Description* - a freetext description (optional). +* *Repository* - you can select any repository even something different than the one that is used for the code checkout. +* *Commit Checkbox* - if enabled will trigger this pipeline for any commit. +* *PR Checkboxes* - various checkboxes for filtering the Pull request event. + +The commit checkbox (by default it is enabled) means that this pipeline will run for *any* commit as long as its source branch matches the naming scheme. This includes commits on pull requests. + +The PR checkboxes mean that this pipeline will run only on the respective events that happen on a Pull Request. You can select multiple checkboxes to further fine-tune the exact event. If you are interested in all events, select the checkbox *Any Pull Request event*. + +>The individual Pull request checkboxes are available only for GitHub repositories. + +## Configure Filter Settings + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/configure-filter-settings.png" +url="/images/pipeline/triggers/configure-filter-settings.png" +alt="Configure Filter Settings" +max-width="50%" +%} + +* *Support pull request events from forks* - toggle that is useful for open source projects. +* *Branch Field* - this is a regular expression and will only trigger for branches that match this naming pattern. +* *PR Comment (restricted) and PR Comment Fields* - useful for open source projects. +* *Pull Request Target* branch - this is a regular expression and will trigger only when a Pull request is created against any branch that matches it. +* *Modified Files* - allows you to constrain the build and trigger it only if the modified files from the commit match this [glob expression](https://en.wikipedia.org/wiki/Glob_(programming)). + +### Pull Request Target Branch and Branch + +The Pull Request Target Branch field allows you to trigger this pipeline only when the target of a Pull Request (i.e. where the pr is going to be merged at) matches the +branch name regular expression. Common examples for branch names would be `master` or `production`. + +This field has only meaning when a commit happens in the context of a pull request and in that case: + +1. The Branch field will look at the branch that the commit is happening on +1. The PR Target Branch field will look at the branch the PR is happening against + +For example, if you create a commit on a branch that is named `my-feature` which is currently part of PR against branch `staging` (i.e. somebody wants to merge `my-feature` **TO** `staging`) then: + +1. The `BRANCH` field value will try to match against `my-feature` +1. the `PULL REQUEST TARGET BRANCH` will try to match against `staging` + +Here are some more syntax examples: + +* `/^((qa-release)$).*/g` - only run if branch is named `qa-release`. +* `/^((production)$).*/g` - only run if branch is named `production`. +* `/release/g` - only run if branch name contains `release` as substring. +* `/feature-/gi` - only run if branch is `feature-foo`, `feature-bar`, `my-feature-123` etc. +* `/^((?!^feature).)*$/gi` - only run if branch name does **not** start with `feature`. + +>The field *Pull Request Target* is available for all Git providers apart from Atlassian stash. +> +>When using the Terraform Provider, please use the [Go regex syntax](https://github.com/google/re2/wiki/Syntax) as some perl regex syntax is not compatible. + +The concept behind these checkboxes and branch name fields is to allow you to define which pipelines run for various workflows in your organization. + +As a simple example you can have a *production* pipeline that runs only on *master* branch (and therefore the branch field says "master") and a *testing* pipeline that runs user acceptance tests where only the Pull Request Open checkbox is active. This means that User Acceptance tests will run whenever a PR is created. Once it is merged the *production* pipeline will deploy the changes. + +In a more advanced example, you could add regular expressions in the branch field with names such as *feature-*, *hotfix-* etc. and the PR checkbox active on different pipelines. This way you could trigger the pull requests only when they happen on specific branches. So, a developer that creates a temporary feature with a name that doesn't match these naming patterns will not trigger those pipelines. + +Notice also that you can use Negative Lookahead in your Branch (Regex Expression) filter. An example to exclude tag events: `/^((?!tag)).*/gi` (the pattern here for tags to exclude is that they begin with `tag…`). + +This will make all push-events (including tags) that do follow the `tag...` pattern to be excluded. +Therefore, all tags like `tag1`, `tag-X` **won't** trigger the pipeline. + +### Pull Requests from comments + +Pull Requests from comments are supported for all Git providers, for both private and public repositories. +There are two options: +* **Pull request comment added (restricted)** + This option triggers an event only when the PR comments are made by repository owners or collaborators. +* **Pull request comment added** + This option triggers an event when PR comments are made by any user, regardless of their permissions. + Because it is not restricted to owners and collaborators, this option is useful in GitHub, to enable triggers for PR comments made by users in GitHub teams. + + > We strongly recommend selecting this option only for _private repositories_. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/pr-comment-trigger-options.png" +url="/images/pipeline/triggers/pr-comment-trigger-options.png" +alt="Trigger options for PR comments" +caption="Trigger options for PR comments" +max-width="50%" +%} + + +### Support for building pull requests from forks + +By default, the Git trigger works for events coming from your personal repository. You can also use triggers from events that are coming from forks. This is a very useful feature for open source projects, as it allows you to run your own unit tests and other checks against a new feature *before* actually merging it in your repo. + +To enable this behavior: + +* Toggle the *support pull request events from forks* switch +* Select *Pull request comment added (restricted)* +* In the *pr comment* field enter a custom string (accepts regex) + +Then once a contributor creates a fork of your repository and submits a pull request, you can review the code and then add a comment on your own that matches the PR comment expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/pr-from-fork.png" +url="/images/pipeline/triggers/pr-from-fork.png" +alt="Triggering a public build from a comment" +caption="Triggering a public build from a comment" +max-width="50%" +%} + +Once that is done, Codefresh will launch your pipeline against the Pull Request. If you manage an open source project with Codefresh, remember to enable [public builds]({{site.baseurl}}/docs/configure-ci-cd-pipeline/build-status/#public-build-logs) as well. + +When supporting building of pull requests from forks there are a few "gotchas" to look out for: + +* Only comments made by repository owners and [collaborators](https://help.github.com/en/github/setting-up-and-managing-organizations-and-teams/adding-outside-collaborators-to-repositories-in-your-organization) will result in the pipeline being triggered. +* Only Git pushes by collaborators within the GitHub organization will result in the pipeline being triggered +* If the repository is in a GitHub organization, comments made by private members of the organization will not activate the trigger, even if they are set as an owner or collaborator. Private members means that they need to be explicitly added to the repository. +Access cannot be "inherited" by the GitHub team. Currently, only comments from Admins, or Collaborators (directly added, not via teams) are allowed, in order to be caught by this filter. +* The *Pull request comment added* checkbox should likely be the only one checked, or your pipeline may trigger on other events that you don't anticipate. + + + +### Monorepo support (Modified files) + +The *modified files* field is a very powerful Codefresh feature that allows you to trigger a build only if the +files affected by a commit are in a specific folder (or match a specific naming pattern). This means that +you can have a big GIT repository with multiple projects and build only the parts that actually change. + +>Currently the field *modified files* is available only for GitHub, GitLab, Azure DevOps and [Bitbucket Server and Data Center](https://confluence.atlassian.com/bitbucketserver/add-a-post-service-webhook-776640367.html) repositories, since they are the only GIT providers +that send this information in the webhook. We will support other GIT providers as soon as they add the respective feature. + +### Using the Modified files field to constrain triggers to specific folder/files + +The *modified files* field accepts glob expressions. The paths are relative to the root folder of the project (where the git repository was checked out). Some possible examples are: + +``` +**/package.json +**/Dockerfile* +my-subproject/** +my-subproject/sub-subproject/package.json +my-subproject/**/pom.xml +!config/** + +``` + +>You can also use relative paths with dot-slash. Therefore `./package.json` and `package.json` are exactly the same thing. They both refer to the file `package.json` found at the root of the git project that was checked out as part of the build. + +You can also define [multiple expressions](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm) like this (but notice that there is a limit of 150 characters for the field): + +``` +{app/**,test/**} +{**/package.json,my-subproject/**} +!{deployment/**,**/version.cfg} +``` + +Once a commit happens to a code repository, Codefresh will see which files are changed from the git provider and trigger the build **only** if the changed files match the glob expression. If there is no match no build will be triggered. + +> Notice that the `{}` characters are only needed if you have more than one expression. Do not use them if you have a single glob expression in the field. + +This is a very useful feature for organizations who have chosen to have multiple projects on the same GIT repository (monorepos). Let's assume for example that a single system has a Java backend, a NestJS frontend and a Ruby-on-Rails internal dashboard. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/monorepo.png" +url="/images/pipeline/triggers/monorepo.png" +alt="GIT monorepo" +max-width="60%" +%} + +Now we can define 3 different pipelines in Codefresh where each one builds the respective project + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/monorepo-pipelines.png" +url="/images/pipeline/triggers/monorepo-pipelines.png" +alt="GIT monorepo pipelines" +max-width="70%" +%} + +And then in the GIT trigger for each one we set the modified files field to the following values: + +* For the *build-nestjs-only* pipeline *MODIFIED FILES* has `my-nestjs-project/**`. +* For the *build-java-only* pipeline *MODIFIED FILES* has `my-java-project/**`. +* For the *build-rails-only* pipeline *MODIFIED FILES* has `my-rails-project/**`. + +This way as multiple developers work on the git repository only the affected projects will actually build. A change to the NestJS project will *not* build the Rails project as well. Also, if somebody changes *only* the README file and nothing else, no build will be triggered at all (which is a good thing as the source code is exactly the same). + +You can also use Glob expressions for files. For example: + +* An expression such as `my-subproject/sub-subproject/package.json` will trigger a build **only** if the dependencies of this specific project are changed +* A pipeline with the expression `my-subproject/**/pom.xml` will trigger only if the Java dependencies for any project that belongs to `my-subproject` actually change +* An expression such as `!config/manifest.yaml` will trigger a build if any file was changed *apart from* `config/manifest.yaml` + +Glob expressions have many more options not shown here. Visit the [official documentation](https://en.wikipedia.org/wiki/Glob_(programming)) to learn more. You can also use the [Glob Tester web application](https://www.digitalocean.com/community/tools/glob) to test your glob expressions beforehand so that you are certain they match the files you expect them to match. + +## Advanced Options + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/advanced-options.png" +url="/images/pipeline/triggers/advanced-options.png" +alt="Advanced Options" +max-width="60%" +%} + +* *Commit Status Title* - the commit status title pushed to the GIT version control system. By default, is the pipeline name, but you can override the name on GIT trigger. +* *Build Variables* - import a [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) or manually add variables +* *More Options* + * *Ignore Docker engine cache for build* - selecting this option may slow down your build. See #1 [here]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) + * *Ignore Codefresh cache optimizations for build* - selecting this option may slow down your build. See #2 [here]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) + * *Reset pipeline volume* - useful for troubleshooting a build that hangs on the first step. See [here]({{site.baseurl}}/docs/troubleshooting/common-issues/restoring-data-from-pre-existing-image-hangs-on/) + * *Report notification on pipeline execution* - Decide if [Slack notifications]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) will be sent (as well as status updates back to your Git provider) +* *Runtime Environment* - choose to use pipeline [settings]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-settings) or override them + +### Set minimum disk space for build volume by trigger +Set the disk space you need for the build volume in the context of the selected trigger. Setting the disk space for the trigger overrides that set for the pipeline. + +1. In **Workflow > Triggers**, expand **Advanced Options**. +1. From the Runtime Environment list, select **Override pipeline settings**, and then select the runtime for which to override the pipeline setting. +1. If required, change the resource size. +1. Enable **Set minimum disk space**, and then change as required. + +## Manually adding the trigger to GitHub + +When creating a Git trigger in codefresh, sometimes the Git Integration does not have the permissions to create a webhook on the designated repository. When this happens, you get the following error: `Failed to add Trigger`. + +This error means that Codefresh could not create the webhook and verify that it works. With that, Codefresh will mark the Trigger as Unverified. Two additional fields (Endpoint and Secret) will appear under the "Verify Trigger" button when you get this error. + +- **Endpoint**: This will be the Webhook URL for the created Trigger +- **Secret**: Token to add to Github for verification. + +### Adding Webhook to Github + +1. When you receive the `Failed to add Trigger`, log into GitHub. + - Make sure this user can access the repository settings and create Webhooks +1. Go to the repository mentioned in the "REPOSITORY" section from Unverified Trigger. +1. Go to Settings > Webhooks and click the "Add webhook" button. +1. Fill in the form + - **Payload URL**: The URL from the Endpoint field from the Trigger + - **Content type**: application/json + - **Secret**: The token in the Secret field from the Trigger + - **SSL verification**: Enable SSL verification + - **Events**: + 1. Select let me select individual events + 2. Match the items selected in the Trigger By field from the Trigger + - **Active**: Make sure this is selected +1. Click "Add webhook" when done. +1. Click "Done" in the Add Trigger form. +1. Test your webhook by making an event in the repository that will cause the Trigger to start the build. + +> **Note**: + * You will be responsible for syncing the Trigger By to the Events sent to us for the webhook. You can select "Send me everything" if you do not want to manually match the Trigger By in the Trigger with the Webhook Events in GitHub. + * The Trigger will remain "Unverified" until the integration has the correct permissions to the repository. + +## Accessing webhook content of the trigger directly + +If your Git trigger is coming from Github, you can also access the whole payload of the webhook that was responsible for the trigger. +The webhook content is available at `/codefresh/volume/event.json`. You can read this file in any pipeline step and process it like any other json file (e.g. with the jq utility). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + read_trigger_webook: + title: "Reading Github webhook content" + type: "freestyle" + image: "alpine:3.9" + commands: + - 'cat /codefresh/volume/event.json' +{% endraw %} +{% endhighlight %} + +Notice however that this file is only available when the pipeline was triggered from a GitHub event. If you manually run the pipeline, the file is not present. + +## Using YAML and the Codefresh CLI to filter specific Webhook events + +The default GUI options exposed by Codefresh are just a starting point for GIT triggers and pull requests. Using [Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) and the [Codefresh CLI plugin](https://codefresh-io.github.io/cli/) you can further create two-phase pipelines where the first one decides +which webhook events will be honored and the second one contains the actual build. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/two-phase-pipeline.png" +url="/images/pipeline/triggers/two-phase-pipeline.png" +alt="Two phase pipeline" +max-width="80%" +%} + +The generic GIT trigger is placed on Pipeline A. This pipeline then filters the applicable webhooks using [conditional expressions]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/). Then it uses the Codefresh CLI plugin (and specifically the [run pipeline capability](https://codefresh-io.github.io/cli/pipelines/run-pipeline/)) to trigger pipeline B that performs build. + +Some of the YAML variables that you might find useful (from the [full list]({{site.baseurl}}/docs/codefresh-yaml/variables/)): + +* `CF_PULL_REQUEST_ACTION` - open, close, synchronize, assign etc. +* `CF_PULL_REQUEST_TARGET` - target branch of the pull request. +* `CF_BRANCH` - the branch that contains the pull request. + +As an example, here is the `codefresh.yml` file of pipeline A where we want to run pipeline B only when a Pull Requested is opened against a branch named *production*. + +`codefresh.yml` of pipeline A +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + triggerstep: + title: trigger + image: codefresh/cli + commands: + - 'codefresh run -b=${{CF_BRANCH}}' -t + when: + condition: + all: + validateTargetBranch: '"${{CF_PULL_REQUEST_TARGET}}" == "production"' + validatePRAction: '''${{CF_PULL_REQUEST_ACTION}}'' == ''opened''' +{% endraw %} +{% endhighlight %} + +This is the build definition for the first pipeline that has a GIT trigger (with the Pull request checkbox enabled). +It has only a single step which uses conditionals that check the name of the branch where the pull request is targeted to, as well as the pull request action. Only if *both* of these conditions are true then the build step is executed. + +The build step calls the second pipeline. The end result is that pipeline B runs only when the Pull Request is opened the first time. Any further commits on the pull request branch will **not** trigger pipeline B (pipeline A will still run but the conditionals will fail). + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Cron triggers]({{site.baseurl}}/docs/pipelines/triggers/cron-triggers/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Multi-git trigger]({{site.baseurl}}/docs/troubleshooting/common-issues/multi-git-triggers/) diff --git a/_docs/pipelines/triggers/helm-triggers.md b/_docs/pipelines/triggers/helm-triggers.md new file mode 100644 index 000000000..98ede0e9b --- /dev/null +++ b/_docs/pipelines/triggers/helm-triggers.md @@ -0,0 +1,61 @@ +--- +title: "Helm Trigger" +description: "" +group: configure-ci-cd-pipeline +sub_group: triggers +toc: true +--- + +Codefresh has the option to create pipelines that respond to Helm events. For instance, one pipeline can be set-up to create a Docker image and chart. Once those are created, another pipeline is triggered to implement the actual deployment. + +Define and manage Helm pipeline triggers with the Codefresh UI. + +## Create a new Helm Trigger + +To add a new Helm trigger, navigate to Codefresh Pipeline *Configuration* view and expand *Triggers* section. Press the `Add Trigger` button and select the `Helm` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + +Fill the following information: +* *Helm Provider* - select `JFrog Artifactory`. +* *Repository* - put JFrog name of the Artifactory repository. +* *Chart Name* - put name of the chart in the Artifactory repository. +* *Action* - select `Push Chart` action. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/configure-artifactory.png" +url="/images/pipeline/triggers/jfrog/configure-artifactory.png" +alt="Helm Artifactory settings" +max-width="50%" +%} + +Click next and a new Dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +url="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set JFrog Artifactory to call this URL when an event takes place. This can either be done through the [JFrog Artifactory webhook plugin]({{site.baseurl}}/docs/pipelines/triggers/jfrog-triggers/) or through [setting up Webhooks](https://www.jfrog.com/confluence/display/JFROG/Webhooks) in the UI. + +## Trigger Codefresh pipeline with an Artifactory push + +Now, every time you push a Helm chart to the selected Artifactory repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Artifactory Push trigger event. + + +## Related articles +[Helm Releases management](https://codefresh.io/docs/docs/new-helm/helm-releases-management/) +[Custom Helm uploads](https://codefresh.io/docs/docs/new-helm/custom-helm-uploads/) +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) diff --git a/_docs/pipelines/triggers/jfrog-triggers.md b/_docs/pipelines/triggers/jfrog-triggers.md new file mode 100644 index 000000000..97471f4df --- /dev/null +++ b/_docs/pipelines/triggers/jfrog-triggers.md @@ -0,0 +1,101 @@ +--- +title: "Artifactory trigger" +description: "Trigger Codefresh pipelines from Artifactory" +group: configure-ci-cd-pipeline +sub_group: triggers +redirect_from: + - /docs/pipeline-triggers/configure-jfrog-trigger/ +toc: true +--- + +Define and manage Artifactory pipeline triggers with the Codefresh UI. +This allows you to trigger Codefresh pipelines when an Artifactory event occurs (i.e. a new Docker image is uploaded). + +## Manage Artifactory Triggers with Codefresh UI + + +The process involves two parts: + +1. Creating a trigger in Codefresh. This will result in a special Codefresh webhook URL +1. Activating the [webhook plugin](https://github.com/jfrog/artifactory-user-plugins/tree/master/webhook) in Artifactory and setting it up to call the Codefresh URL + +> Make sure that you have admin access to your Artifactory instance in order to setup its webhook plugin. + +### Create a new Artifactory trigger + +To add a new Artifactory trigger, navigate to a Codefresh Pipeline *Configuration* view and expand the *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="40%" +%} + +Fill the following information: + +* *Registry Provider* - select `JFrog`. +* *Repository Name* - put JFrog name of repository. +* *Docker Image Name* - put name of Docker image. +* *Action* - select `Push Image` action. +* *Tag* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/configure-trigger.png" +url="/images/pipeline/triggers/jfrog/configure-trigger.png" +alt="Artifactory Registry settings" +max-width="50%" +%} + +Click next and a new Dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +url="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set JFrog Artifactory to call this URL when an event takes place. + +### Set up JFrog Artifactory webhook plugin + +The [webhook functionality](https://github.com/jfrog/artifactory-user-plugins/tree/master/webhook) in JFrog artifactory comes in plugin. +You can read [detailed documentation](https://www.jfrog.com/confluence/display/RTF/User+Plugins) for JFrog plugins but in summary: + +* The file `webhook.groovy` needs to be copied to `ARTIFACTORY_HOME/etc/plugins` (the plugin itself) +* A file `webhook.config.json` should also be placed in the same folder (the plugin setup) + +Here is an example for Codefresh. + +`webhook.config.json` +{% highlight json %} +{% raw %} +{ + "webhooks": { + "mywebhook": { + "url": "https://g.codefresh.io/nomios/jfrog?account=2dfdf89f235bfe&sefgt=EvQf9bBS55UPekCu", + "events": [ + "docker.tagCreated" + ] + } + }, + "debug": false, + "timeout": 15000 +} +{% endraw %} +{% endhighlight %} + + + +### Trigger a Codefresh pipeline with an Artifactory push + +Now, every time you push/tag a Docker image to the selected Artifactory repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Artifactory Push trigger event. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/pipelines/triggers/quay-triggers.md b/_docs/pipelines/triggers/quay-triggers.md new file mode 100644 index 000000000..1e7e275f6 --- /dev/null +++ b/_docs/pipelines/triggers/quay-triggers.md @@ -0,0 +1,102 @@ +--- +title: "Quay Trigger" +description: "Trigger Codefresh pipelines from Quay" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/pipeline-triggers/configure-quay-trigger/ +toc: true +--- + +Define and manage Quay triggers for pipelines with the Codefresh UI. +This allows you to trigger Codefresh pipelines when a Quay event happens (e.g. a new Docker image is uploaded). + +## Manage Quay triggers with Codefresh UI + + +The process involves two parts: + +1. Creating a trigger in Codefresh (this will result in a special Codefresh webhook URL) +1. Creating a new notification in Quay that will use this URL to call Codefresh + +> Make sure that you have a Quay account and have already [created a repository](https://docs.quay.io/guides/create-repo.html) (or pushed a Docker image at least once). + + +### Create a new Quay Trigger + +To add a new Quay trigger, navigate to a Codefresh Pipeline *Configuration* view and expand the *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="40%" +%} + +Fill the following information: + +* *Registry Provider* - select `Quay`. +* *User/Organization Name* - put Quay username or organization name here. +* *Image Repository Name* - Quay image repository name. +* *Action* - select `Push Image` action. +* *Tag* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/add-trigger-dialog.png" +url="/images/pipeline/triggers/quay/add-trigger-dialog.png" +alt="Quay Registry settings" +max-width="50%" +%} + +Click next and a new Dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/view-trigger-dialog.png" +url="/images/pipeline/triggers/quay/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set Quay to call this URL when an event takes place. + +### Set up Quay notification + +Log in your Quay account and go to the respective repository. You can also click the link shown in the Codefresh dialog to go directly to the settings of that repository. + +Scroll down and under *Events and Notifications* click *Create Notification*. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/add-quay-notification.png" +url="/images/pipeline/triggers/quay/add-quay-notification.png" +alt="Add Quay Notification" +max-width="50%" +%} + +In the new screen select *Push to repository* from the drop-down or any other event that you wish the Codefresh pipeline to trigger. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/edit-quay-notification.png" +url="/images/pipeline/triggers/quay/edit-quay-notification.png" +alt="Edit Quay Notification" +max-width="50%" +%} + +From the next dropdown choose *Webhook Post*. In the *Webhook URL entry* paste the Codefresh URL that was created in the Codefresh Trigger dialog. + +Finally click *Create Notification*. + + +### Triggering a Codefresh pipeline with Quay push + +Now, every time you push a new Docker image to the selected Quay repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Quay Push trigger event. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/pipelines/using-secrets.md b/_docs/pipelines/using-secrets.md deleted file mode 100644 index 582040570..000000000 --- a/_docs/pipelines/using-secrets.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Using secrets" -description: "" -group: pipelines -toc: true ---- - -Coming soon diff --git a/_docs/pipelines/variables.md b/_docs/pipelines/variables.md new file mode 100644 index 000000000..7ec425542 --- /dev/null +++ b/_docs/pipelines/variables.md @@ -0,0 +1,338 @@ +--- +title: "Variables in pipelines" +description: "" +group: pipelines +redirect_from: + - /docs/variables/ +toc: true +--- +Codefresh provides a set of predefined variables automatically in each build, that you can use to parameterize the way your pipeline works. You can also define your own variables. Some common examples of predefined variables include: + +* `CF_BRANCH` is the Git branch that was used for this pipeline. +* `CF_REVISION` is the Git hash that was used for this pipeline. +* `CF_BUILD_URL` is the url of the pipeline build. + +## Using Codefresh variables in your pipelines + +There are two ways to use a Codefresh variable in your pipelines: + +1. By default all variables will be exposed as UNIX environment variables in all freestyle steps as `$MY_VARIABLE_EXAMPLE`. +1. Variables can be used in YAML properties with the syntax {% raw %}`${{MY_VARIABLE_EXAMPLE}}`{% endraw %}. + +> If you are unsure about which form you need to use, feel free to use {% raw %}`${{MY_VARIABLE_EXAMPLE}}`{% endraw %} everywhere. This is the Codefresh specific form and should function in all sections of `codefresh.yml`. + +For example, you can print out the branch as an environment variable like this: + +`YAML` +{% highlight yaml %} +{% raw %} +MyOwnStep: + title: Variable example + image: alpine + commands: + - echo $CF_BUILD_ID + - echo $CF_BRANCH_TAG_NORMALIZED +{% endraw %} +{% endhighlight %} + +In the example above we are using simple `echo` commands, but any program or script that reads environment variables could also read them in the same manner. + +Using variables directly in yaml properties can be done like this: + +`YAML` +{% highlight yaml %} +{% raw %} +MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + tag: ${{CF_BRANCH_TAG_NORMALIZED}} +{% endraw %} +{% endhighlight %} + +You can also concatenate variables: + +`YAML` +{% highlight yaml %} +{% raw %} +MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + tag: ${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}} +{% endraw %} +{% endhighlight %} + +This will create docker images with tags such as: + +``` +master-df6a04c +develop-ba1cd68 +feature-vb145dh +``` + + + + +Notice that this syntax is specific to Codefresh and is **only** available within the Codefresh YAML file itself. If you want to write scripts or programs that use the Codefresh variables, you need to make them aware of the environment variable form. + + +## System variables + +System variables are automatically injected to any freestyle step as environment variables. + +> It is important to understand that all Git related variables such `CF_BRANCH`, `CF_COMMIT_MESSAGE`, `CF_REVISION` etc. are coming directly from the Git provider you use and have the same limitations of that provider. For example GitLab is sending less information in pull request events than normal pushes, and Bitbucket sends only the short hash of a commit in pull request events. We suggest you read the documentation of your Git provider first to understand what information is available for every Git event + +{: .table .table-bordered .table-hover} +| Variable | Description | +| ------------------------------------------------- | ------------------------------------------------------ | +| {% raw %}`${{CF_REPO_OWNER}} `{% endraw %} | Repository owner. | +| {% raw %}`${{CF_REPO_NAME}}`{% endraw %} | Repository name. | +| {% raw %}`${{CF_BRANCH}}`{% endraw %} | Branch name (or Tag depending on the payload json) of the Git repository of the main pipeline, at the time of execution.
            You can also use {% raw %}`${{CF_BRANCH_TAG_NORMALIZED}}`{% endraw %} to get the branch name normalized. It will be without any chars that are illegal in case the branch name were to be used as the Docker image tag name. You can also use {% raw %}`${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}}`{% endraw %} to force lowercase. | +| {% raw %}`${{CF_BASE_BRANCH}}`{% endraw %} | The base branch used during creation of Tag | +| {% raw %}`${{CF_PULL_REQUEST_ACTION}}`{% endraw %} | The pull request action. Values are those defined by your Git provider such as [GitHub](https://developer.github.com/webhooks/), [GitLab](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html), [Bitbucket](https://confluence.atlassian.com/bitbucket/manage-webhooks-735643732.html) etc. | +| {% raw %}`${{CF_PULL_REQUEST_TARGET}}`{% endraw %} | The pull request target branch | +| {% raw %}`${{CF_PULL_REQUEST_NUMBER}}`{% endraw %} | The pull request number | +| {% raw %}`${{CF_PULL_REQUEST_ID}}`{% endraw %} | The pull request id | +| {% raw %}`${{CF_PULL_REQUEST_LABELS}}`{% endraw %} | The labels of pull request (GitHub and GitLab only) | +| {% raw %}`${{CF_COMMIT_AUTHOR}}`{% endraw %} | Commit author. | +| {% raw %}`${{CF_BUILD_INITIATOR}}`{% endraw %} | The person (username) that started the build. If the build was started by a Git webhook (e.g. from a Pull request) it will hold the webhook user. Notice that if a build is restarted manually it will always hold the username of the person that restarted it. | +| {% raw %}`${{CF_ACCOUNT}}`{% endraw %} | Codefresh account for this build | +| {% raw %}`${{CF_COMMIT_URL}}`{% endraw %} | Commit url. | +| {% raw %}`${{CF_COMMIT_MESSAGE}}`{% endraw %} | Commit message of the Git repository revision, at the time of execution.
            The messages quotes are escaped (i.e. ' is not \', " is now \"). | +| {% raw %}`${{CF_COMMIT_MESSAGE_ESCAPED}}`{% endraw %} | Commit message of the Git repository revision, at the time of execution.
            Special characters are escaped. | +| {% raw %}`${{CF_REVISION}}`{% endraw %} | Revision of the Git repository of the main pipeline, at the time of execution.
            You can also use {% raw %}`${{CF_SHORT_REVISION}}`{% endraw %} to get the abbreviated 7-character revision hash, as used in Git. Note: use this variable as string with quotes to tag the image {% raw %}`${{CF_SHORT_REVISION}}`{% endraw %} | +| {% raw %}`${{CF_VOLUME_NAME}}`{% endraw %} | Refers to the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) between [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/). Normally you only need to define this in [compositions]({{site.baseurl}}/docs/pipelines/steps/composition/). In freestyle steps, it is automatically present without any extra configuration. | +| {% raw %}`${{CF_VOLUME_PATH}}`{% endraw %} | Refers to the mounted path of the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) inside a Freestyle container. In the current implementation it expands to `/codefresh/volume`. | +| {% raw %}`${{CF_BUILD_TRIGGER}}`{% endraw %} | Will be an indication of the current build was triggered: *build: The build was triggered from the build button* webhook: The build was triggered from a control version webhook | +| {% raw %}`${{CF_BUILD_ID}}`{% endraw %} | The build id. Note: use this variable as string with quotes to tag the image {% raw %}`${{CF_BUILD_ID}}`{% endraw %} | +| {% raw %}`${{CF_BUILD_TIMESTAMP}}`{% endraw %} | The timestamp the build was created. Note: use this variable as string with quotes to tag the image {% raw %}`${{CF_BUILD_TIMESTAMP}}`{% endraw %} | +| {% raw %}`${{CF_BUILD_URL}}`{% endraw %} | The URL to the build in Codefresh | +| {% raw %}`${{CF_PIPELINE_NAME}}`{% endraw %} | The full path of the pipeline, i.e. "project/pipeline" | +| {% raw %}`${{CF_STEP_NAME}}`{% endraw %} | the name of the step, i.e. "MyUnitTests" | +| {% raw %}`${{CF_URL}}`{% endraw %} | The URL of Codefresh system | +| {% raw %}`${{CI}}`{% endraw %} | The value is always `true` | +| {% raw %}`${{CF_KUBECONFIG_PATH}}`{% endraw %} | Path to injected kubeconfig if at least one Kubernetes cluster [is configured]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/). You can easily run [custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/) since it is automatically setup by Codefresh in all pipelines. | +| Any variable specified in the pipeline settings | For example, if you configure the pipeline settings with a variable named PORT, you can put the variable in your YAML build descriptor as {% raw %}`${{PORT}}`{% endraw %}. | + +## Context-related Variables +Context-related variables are created dynamically during the workflow execution and according to the used steps. + +{: .table .table-bordered .table-hover} +| Variable | Description | +| ------------------------------------------------- | ------------------------------------------------------ | +| **Working Directories** | For example, you can set the working directory of step `A` with a variable named after a previously executed step, step `B`. Therefore, setting step `A` with {% raw %}`working-directory:${{B}}`{% endraw %} means that step `A` executes in the same working directory as step `B`. | +| **Images** | You can set the candidate field of the push step with a variable named after a previously executed build step. Since the details of a created image are not necessarily known ahead of time, the variable can create an association to an optionally dynamic image name. Therefore, setting push step `A` with {% raw %}`candidate:${{B}}`{% endraw %} means that step `A` will push the image built by step `B`. Note that this capability works only for `candidate` and `image` fields in Codefresh steps. | + +A very common pattern in Codefresh pipelines, is to create a Docker image in one step, and then run a command on its container in the next step (e.g. run [unit tests]({{site.baseurl}}/docs/testing/unit-tests/)): + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - ./my-unit-tests.sh +{% endraw %} +{% endhighlight %} + +In the example above you can see the `MyAppDockerImage` variable that denotes a Docker image created dynamically within this single pipeline. In the second step we use it as a Docker context in order to run unit tests. See also the [unit testing example app]({{site.baseurl}}/docs/yaml-examples/examples/run-unit-tests/). + +## Step variables + +Every [step]({{site.baseurl}}/docs/pipelines/steps/) in a Codefresh pipeline also exposes several built-in variables. You can access them via the global `steps` parent variable. + + * Each step creates a variable based on the name of the step. You can then use the members of each variable for status conditions such as: `steps.MyUnitTests.result == 'error'` for a step called `MyUnitTests`. + * To access variables that have a non-standard (i.e. only alphanumeric and _ characters) names, use the Variable() function. + +### Step Member variables + +Variables that are created by steps can have members. The members depend on the step type. For example if you have a build step named `myBuildStep` you can get the ID of the docker image that gets created with {% raw %}`echo ${{steps.myBuildStep.imageId}}`{% endraw %} + +{: .table .table-bordered .table-hover} +| Step Type | Members | +| ----------------------- | -------------------------------------- | +| All step types | {::nomarkdown}
            • name
            • type
            • description
            • workingDirectory
            • result
            {:/} +| [**Freestyle**]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) | - | +| [**Composition**]({{site.baseurl}}/docs/codefresh-yaml/steps/composition/) | - | +| [**Build**]({{site.baseurl}}/docs/codefresh-yaml/steps/build/) | {::nomarkdown}
            • imageName
            • imageTagName
            • imageId
            {:/} | +| [**Git-clone**]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) | {::nomarkdown}
            • revision
            • repo
            {:/} | +| [**Push**]({{site.baseurl}}/docs/codefresh-yaml/steps/push/) | {::nomarkdown}
            • registry
            • imageId
            • imageRepoDigest
            {:/} | +| [**Approval**]({{site.baseurl}}/docs/codefresh-yaml/steps/approval/) | {::nomarkdown}
            • authEntity.name
            • authEntity.type
            {:/} | + + + +## GitHub release variables + +GitHub allows you to create [releases](https://help.github.com/articles/creating-releases/) for marking specific Git tags for general availability. + +You can set a [trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) for GitHub releases. When a GitHub release happens, the following variables are also available: + + + +{: .table .table-bordered .table-hover} +| Variable | Description | +| --------------- | ------------------------------------------------------ | +| {% raw %}`${{CF_RELEASE_NAME}}`{% endraw %} | GitHub release title | +| {% raw %}`${{CF_RELEASE_TAG}}`{% endraw %} | Git tag version | +| {% raw %}`${{CF_RELEASE_ID}}`{% endraw %} | Internal ID for this release | +| {% raw %}`${{CF_PRERELEASE_FLAG}}`{% endraw %} | true if the release if marked as non-production ready, false if it is ready for production | + +## GitHub Pull Request variables + +When a pull request is closed in GitHub, the following variables are also available + +{: .table .table-bordered .table-hover} +| Variable | Description | +| --------------- | ------------------------------------------------------ | +| {% raw %}`${{CF_PULL_REQUEST_MERGED}}`{% endraw %} | true if the pull request was merged to base branch | +| {% raw %}`${{CF_PULL_REQUEST_HEAD_BRANCH}}`{% endraw %} | the head branch of the PR (the branch that we want to merge to master) | +| {% raw %}`${{CF_PULL_REQUEST_MERGED_COMMIT_SHA}}`{% endraw %} | the commit SHA on the base branch after the pull request was merged (in most cases it will be master) | +| {% raw %}`${{CF_PULL_REQUEST_HEAD_COMMIT_SHA}}`{% endraw %} | the commit SHA on the head branch (the branch that we want to push) | + +## User-defined variables + +User variables can be defined at 6 levels: + +1. Manually within a step using the [export](http://linuxcommand.org/lc3_man_pages/exporth.html) command or in any **subsequent** step with the [cf_export]({{site.baseurl}}/docs/codefresh-yaml/variables/#using-cf_export-command) command +1. [Freestyle Step Definition]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/#examples) (using the `environment` field) +1. Specific build Execution (after clicking the "Build" button open the "Build Variables" section, or use the [CLI]({{site.baseurl}}/docs/integrations/codefresh-api/#example---triggering-pipelines)) +1. Pipeline Definition (under "Environment variables" section in the [pipeline view]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#creating-new-pipelines)) +1. [Shared Configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) (defined under your account settings, and used using the "Import from shared configuration" button under the "Environment Variables" section in the pipeline view) +1. Variables defined on the Project level (Under the variables tab on any project view) + +The options are listed in order of priority (from the most important to the least important), so in case of multiple variables defined at different locations with the same name, the order of overriding will be as listed here. + +For example if a pipeline variable is defined both in project level and as an execution parameter of a specific build, then the final result will be the value defined as a build parameter and the project level variable will not take effect. + +## Exporting environment variables from a freestyle step + +Steps defined inside steps are scoped to the step they were created in (even if you used the `export` command). In order to allow using variables across steps, we provide a shared file that facilitates variables importing and exporting. There are two ways to add variables to this file: + +### Using cf_export command +Within every freestyle step, the `cf_export` command allows you to export variables across steps (by writing to the shared variables file). + +> The variables exported with cf_export overrides those at the pipeline-level. + +You can either: +- Explicitly state a VAR=VAL pair +- State the name of an existing *exported* environment variable (like EXISTING_VAR). + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + freestyle-step-1: + description: Freestyle step.. + title: Free styling + image: alpine:latest + commands: + # Normal export will only work in a single step + - export EXISTING_VAR=www.example.com + + # CF export will now work in all other subsequent steps + - cf_export VAR1=alpine:latest VAR2=VALUE2 EXISTING_VAR + + freestyle-step-2: + description: Freestyle step.. + title: Free styling 2 + image: ${{VAR1}} + commands: + - echo $VAR2 + - echo http://$EXISTING_VAR/index.php +{% endraw %} +{% endhighlight %} + +Notice that `cf_export` has the same syntax structure as the [bash export command](https://www.gnu.org/software/bash/manual/html_node/Environment.html). This means that when you use it you **don't** need any dollar signs for the variable created/assigned. + +``` +cf_export $MY_VAR # Don't do this +cf_export MY_VAR # Correct syntax +``` + +Also notice that `cf_export` works on *subsequent* steps only. If you want to export a variable right away in the present step and all the rest of the steps you need to do the following: + +``` +export MY_VAR='example' # Will make MY_VAR available in this step only +cf_export MY_VAR='example' # Will also make MY_VAR available to all steps after this one +``` + +There is nothing really magic about `cf_export`. It is a normal script. You can see its contents on your own by entering the command `cat /codefresh/volume/cf_export` on any [Codefresh freestyle step]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) inside a pipeline. + +For more information on its limitations see the [troubleshooting page]({{site.baseurl}}/docs/troubleshooting/common-issues/cf-export-limitations/). + + + +### Directly writing to the file + +For more advanced use cases, you can write directly to the shared variable file that Codefresh reads to understand which variables need to be available to all steps. This file has a simple format where each line is a variable and its value in the form of `VARIABLE=VALUE`. The `cf_export` command mentioned in the previous section is just a shorthand for writing on this file. + +The variables file is available inside freestyle steps in the following path: **`{% raw %}${{CF_VOLUME_PATH}}{% endraw %}/env_vars_to_export`** + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + freestyle-step-1: + description: Freestyle step.. + title: Free styling + image: alpine:latest + commands: + - echo VAR1=192.168.0.1 >> ${{CF_VOLUME_PATH}}/env_vars_to_export + - echo hey=alpine:3.9 >> ${{CF_VOLUME_PATH}}/env_vars_to_export + + freestyle-step-2: + description: Freestyle step.. + title: Free styling 2 + image: ${{hey}} + commands: + - echo http://$VAR1/index.php +{% endraw %} +{% endhighlight %} + +Use this technique if you have complex expressions that have issues with the `cf_export` command. + +## Masking variables in logs + +Codefresh has the built-in capabililty to automatically mask variables in logs if they are encrypted. The values of encrypted variables will be replaced with asterisks in build logs. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/variables/masked-variables.png" +url="/images/pipeline/codefresh-yaml/variables/masked-variables.png" +alt="Masked variables" +caption="Masked variables" +max-width="80%" +%} + +The variables can be defined in any of the usual ways Codefresh offers such as [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) or [within the pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-settings): + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/variables/encrypted-variables.png" +url="/images/pipeline/codefresh-yaml/variables/encrypted-variables.png" +alt="Encrypted variables" +caption="Encrypted variables" +max-width="60%" +%} + +>Notice that this feature is currently available only in Enterprise accounts. + + +## Escape characters +When passing special characters through environmental variables `\` can be used as an escape character. For example if you were passing a cassandra connection string you might do something like `Points\=hostname\;Port\=16376\;Username\=user\;Password\=password` + +This will safely escape `;` and `=`. + +## Related articles +[Pipeline steps]({{site.baseurl}}/docs/codefresh-yaml/steps/) +[Codefresh Conditionals]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) diff --git a/_docs/pipelines/what-is-the-codefresh-yaml.md b/_docs/pipelines/what-is-the-codefresh-yaml.md new file mode 100644 index 000000000..4be99e303 --- /dev/null +++ b/_docs/pipelines/what-is-the-codefresh-yaml.md @@ -0,0 +1,378 @@ +--- +title: "Pipeline definitions YAML" +description: "How to define Codefresh pipelines in a declarative manner" +group: pipelines +redirect_from: + - /docs/codefresh-yaml/ + - /docs/what-is-the-codefresh-yaml + - /docs/what-is-the-codefresh-yaml/ + - /docs/codefresh-yaml/working-directories/ + - /docs/working-directories/ +toc: true +--- + +Codefresh offers its own built-in format for creating pipelines. The pipeline specification is +based on the YAML syntax allowing you to describe your pipelines in a completely declarative manner. + +Using Codefresh yaml is the recommended way to [create pipelines]({{site.baseurl}}/docs/pipelines/pipelines/). + +## Simple example for codefresh.yml + +Here is a very minimal example: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + build_image: + type: build + description: Building the image... + image-name: myuser/myservice + tag: develop # {% raw %}${{CF_BRANCH}}{% endraw %} + + perform_tests: + image: node:5 + working_directory: {% raw %}${{main_clone}}{% endraw %} + description: Performing unit tests... + commands: + - npm install gulp -g + - npm install + - gulp unit_test +{% endhighlight %} + +It contains two [steps]({{site.baseurl}}/docs/pipelines/steps/), one named *build_image* that creates a docker image, and another one called *perform_tests* that runs unit test with `gulp`. + +If you want to know more about how steps work in Codefresh make sure to read [the introduction to pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) first, before moving on. + +## Basic pipeline syntax + +You can customize your build environment (pipeline) by using the Codefresh YAML file, ```codefresh.yml```. Codefresh uses the build specifications in the ```codefresh.yml``` file to execute your build. The ```codefresh.yml``` can be basic or it can include intricate build specifications. + +A YAML file is comprised of a series of steps that are executed in the order in which they are specified. + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' + +steps: + step-name: + [step-contents] + another-step: + [step-contents] + the-very-last-step: + [step-contents] +{% endhighlight %} + +You must define a step type for each step, unless you are using a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). Each step uses Docker images and containers as facilitators for execution. For example, the **Freestyle** step spins up a container and executes the specified shell commands from the YAML file. + +The step names should be unique within the same pipeline. This mainly affects the visualization of the pipeline when it runs. + +Each step produces a resource, which you can [reference](https://github.com/codefresh-contrib/python-flask-sample-app/blob/master/codefresh.yml#L23) in other steps, and are executed in real-time. For example, a **Freestyle** step can reference an image that was produced by a [**Build**]({{site.baseurl}}/docs/pipelines/steps/build/) step. This allows you to chain steps together and create highly-customized builds. + + +##### Variables + +Steps chaining and referencing is possible due to implementation of variables in the YAML file - read more on relevant [section]({{site.baseurl}}/docs/pipelines/variables/). + + +{: .table .table-bordered .table-hover} +| Step Type | Description | +| ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- | +| [Freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) | Executes one or more shell commands in a container similar to `docker run`. | +| [Build]({{site.baseurl}}/docs/pipelines/steps/build/) | Builds a Docker image like `docker build`. | +| [Push]({{site.baseurl}}/docs/pipelines/steps/push/) | Pushes a Docker image to an external registry similar to `docker tag` and `docker push`. | +| [Git Clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) | Overrides the default git clone behavior. | +| [Composition]({{site.baseurl}}/docs/pipelines/steps/composition/) | Starts a Docker Composition like `docker-compose`. Discarded once pipelines finishes. | +| [Launch Composition]({{site.baseurl}}/docs/pipelines/steps/launch-composition/) | Starts a long term Docker composition that stays up after the end of the pipeline. | +| [Deploy]({{site.baseurl}}/docs/pipelines/steps/deploy/) | Deploys to Kubernetes clusters. | +| [Approval]({{site.baseurl}}/docs/pipelines/steps/approval/) | Pauses a pipeline and waits for human intervention. | + + +For more information on creating your own step, see the [Steps in piplines]({{site.baseurl}}/docs/pipelines/steps/). + +You can also see the [full YAML specification]({{site.baseurl}}/docs/integrations/codefresh-api/#full-pipeline-specification) supported for pipelines. Note however that several fields are only accessible by using the [Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api) or [CLI](https://codefresh-io.github.io/cli/). + +## Yaml validation + +If you are editing Codefresh yaml within the Codefresh UI, the editor will automatically highlight errors as they happen. + +This allows you to make quick edits (and possibly run some builds) straight from the GUI. Once you are happy with your pipeline you should commit it to your repository as `codefresh.yml` (pipeline as code). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/inline-editor.png" +url="/images/pipeline/codefresh-yaml/inline-editor.png" +alt="Graphical Inline Yaml Editor" +caption="Graphical Inline Yaml Editor" +max-width="50%" +%} + +You can also validate the pipeline yaml outside of the UI by using the [Codefresh CLI](https://codefresh-io.github.io/cli/). The CLI has a [validate parameter](https://codefresh-io.github.io/cli/validation/) that can check one or more files for syntax errors + +{% highlight shell %} +{% raw %} +$ codefresh validate codefresh.yml +Yaml not valid: + - "invalid-property" is not allowed +{% endraw %} +{% endhighlight %} + +For more information on where the YAML file can be stored see the [creating pipelines page]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/). + +## Execution flow + +By default, Codefresh will execute all steps in the yaml file and instantly fail the build, if any step +presents an error. To change this behavior add the `fail_fast:false` property in any step that you wish to be ignored +in case of errors. + +For example, if you have a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that runs integration tests, and you don't want the whole pipeline +to fail if any of the tests fail, add the `fail_fast` line to that step: + + +{% highlight yaml %} +perform_tests: + image: node:9 + description: Running integration tests + fail_fast: false + commands: + - gulp integration_test +{% endhighlight %} + +Now the pipeline will continue to run even if the step `perform_tests` fails. + +Notice also that by default Codefresh pipelines run in *sequential mode*. All steps will be executed one after +the other and in the same order as included in the `codefresh.yml` file. + +If you wish to use parallel steps in your pipelines, see the [parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/) page. + +## Working directories + +In the context of a step, a working directory can be of the following type: + +{: .table .table-bordered .table-hover} +| Working Directory | Description | +| --------------------- | -------------------------------------------- | +| Empty | Defaults to the [Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) (found at `/codefresh/volume`). If there is a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) with the special name `main_clone` then the default working directory for built-in steps is now the [project folder]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) that was checked out - this only applies to [built-in]({{site.baseurl}}/docs/pipelines/steps/#built-in-steps) Codefresh steps and not [custom plugins]({{site.baseurl}}/docs/pipelines/steps/#creating-a-typed-codefresh-plugin). | +| Variable that contains the ID of a [Git-Clone]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) step | Runs the step within the cloned directory. | +| Variable that contains the ID of any other step | Runs the step within the same working directory that the specified was executed. This option is not available for for [**Git-Clone**]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) steps. | +| Absolute filesystem path | Treated as is within the container. | +| Relative filesystem path | Treated as relative path from the cloned directory of the service | +| 'IMAGE_WORK_DIR' | Use this value in order to use the image working directory for example:
            `working_directory: IMAGE_WORK_DIR` | + + +## Retrying a step + +Sometimes you want to retry a step that has a problem. Network hiccups, transient failures and flaky test environments are common problems that prevent pipelines from working in a predictable manner. + +Codefresh allows you to retry any of your steps with the built-in syntax: + + `yaml` +{% highlight yaml %} +{% raw %} +step-name: + [step-contents] + retry: + maxAttempts: 5 + delay: 5 + exponentialFactor: 2 +{% endraw %} +{% endhighlight %} + +The `retry:` block has the following parameters: + + * `maxAttempts` defines how many times this step will run again if there are execution errors (default is 1 and the Max. is 10). + * `delay` is the number of seconds to wait before each attempt (default is 5 seconds and the Max. is 60 seconds). + * `exponentialFactor` defines how many times the delay should be multiplied by itself after each attempt (default is 1 and Max. is 5). + +All parameters are optional. The exponentialFactor works like this: +* exponentialFactor=1, delay=5 => each time wait 5 seconds before trying again, no matter the number of attempts. +* exponentialFactor=2, delay=5 => first retry will have a delay of 25 seconds, third will have 125 and so on. + + +Here is a full example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + retry: + maxAttempts: 2 + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - ./my_unit_tests.sh + retry: + maxAttempts: 3 + delay: 5 + PushingToRegistry: + type: push + title: Pushing To Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_BRANCH}}' + retry: + maxAttempts: 3 + delay: 3 + exponentialFactor: 2 +{% endraw %} +{% endhighlight %} + +Notice that Codefresh also provides the following variables that allow you change your script/applications according to the retry attempts: + +* `CF_CURRENT_ATTEMPT` contains the number of current retry attempt. +* `CF_MAX_ATTEMPTS` contains all the number of total attempts defined. + +The retry mechanism is available for all kinds of [steps]({{site.baseurl}}/docs/pipelines/steps/). + +## Escaping strings + +If you want to use strings inside your pipeline that create conflicts with the Codefresh syntax parser (for example they are YAML themselves) you need +to escape them using multi-line strings with the `>-` and `|-` characters. + +The following pipeline is not parsed correctly because the echo command is using the yaml `:` character + +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + test: + title: "Running test" + type: "freestyle" + image: "alpine:3.9" + commands: + - echo hello: world +{% endraw %} +{% endhighlight %} + +You can fix this issue by using a multi-line YAML string: + +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + test: + title: "Running test" + type: "freestyle" + image: "alpine:3.9" + commands: + - |- + echo hello: world +{% endraw %} +{% endhighlight %} + +The `|-` character keeps the line breaks of the text (but removes the last one). Use the `>-` character if you want to convert line breaks to spaces. +For more information see the [YAML specification](https://yaml.org/spec/1.2/spec.html). + +## Using YAML anchors to avoid repetition + +Codefresh also supports yaml anchors, references and extends. These allow you to keep +your pipeline [DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself). + +For example, let's say that you have two freestyle steps: + +1. The first one fills a MySQL server with data. +1. The second one runs integration tests that use the MySQL server. + +Here is the respective pipeline: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + preLoadDatabase: + title: Loading Data + image: alpine + commands: + - printenv + - echo "Loading DB" + environment: &my_common_envs + - MYSQL_HOST=mysql + - MYSQL_USER=user + - MYSQL_PASS=password + - MYSQL_PORT=3351 + runTests: + title: Integration tests + image: alpine + commands: + - printenv + - echo "Running tests" + environment: *my_common_envs # Same MYSQL_HOST, MYSQL_USER etc. +{% endhighlight %} + +Instead of repeating the same environment variables in both steps, we can create them once and then just reference them in the second step with the `*` character. + +You also define anchors at the top of the pipeline in the special `indicators` block: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' + +indicators: + - environment: &my_common_envs + - MYSQL_HOST=mysql + - MYSQL_USER=user + - MYSQL_PASS=password + - MYSQL_PORT=3351 + +steps: + preLoadDatabase: + title: Loading Data + image: alpine + commands: + - printenv + - echo "Loading DB" + environment: *my_common_envs # Same MYSQL_HOST, MYSQL_USER etc. + runTests: + title: Integration tests + image: alpine + commands: + - printenv + - echo "Running tests" + environment: *my_common_envs # Same MYSQL_HOST, MYSQL_USER etc. + +{% endhighlight %} + + +Finally. you also extend steps like below: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + deploy_to_k8_staging: &my_basic_deployment + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myStagingCluster + namespace: sales + service: my-python-app + deploy_to_k8_prod: + <<: *my_basic_deployment + cluster: myProdCluster # only cluster differs, everything else is the same + +{% endhighlight %} + +Here we deploy to two kubernetes clusters. The first step defines the staging deployment. +For the second step, we extend the first one and only change the name of the cluster +to point to production. Everything else (i.e. namespace and service) are exactly the same. + + +## Related articles +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Variables in CI pipelines]({{site.baseurl}}/docs/pipelines/variables/) +[Advanced workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[YAML examples]({{site.baseurl}}/docs/example-catalog/examples/) + + + + + + + diff --git a/_docs/quick-start/ci-quick-start.md b/_docs/quick-start/ci-quick-start.md new file mode 100644 index 000000000..57f6ef389 --- /dev/null +++ b/_docs/quick-start/ci-quick-start.md @@ -0,0 +1,45 @@ +--- +title: "Create a project for pipeline" +description: "" +group: quick-start +toc: true +--- + +Codefresh pipelines are grouped under projects. Think of a project as a folder or directory that groups related pipelines. For example, all pipleines that package/deploy the different microservices for an application. +You can define any name for the project, the most common example being the name of the application that the pipelines + +### Where are you + + + +### Before you beigin + +* [Connect a Docker registry to Codefresh]({{site.baseurl}}/docs/quick-start/create-docker-registry-integration) + +### How to +1. In the Codefresh UI, in the sidebar, from Pipelines, select **Projects**. +1. On the top-right, click **New Project**. +1. Enter the **Project Name**. For example, `my-first-project`. +1. Leave the **Project Tags** empty for the quick start. +1. Select any **Icon** you want. The icon is prefixed to the project name in the Projects page. +1. Click **Create**. + +{% include image.html + lightbox="true" + file="/images/quick-start/quick-start-ci/create-ci-project.png" + url="/images/quick-start/quick-start-ci/create-ci-project.png" + alt="Create project for pipeline" + caption="Create project for pipeline" + max-width="70%" +%} + +You now have a new project and can create your first pipeline. + + +### Continue with + + + + + + diff --git a/_docs/quick-start/ci-quickstart/create-ci-pipeline.md b/_docs/quick-start/ci-quickstart/create-ci-pipeline.md new file mode 100644 index 000000000..e0bc80b55 --- /dev/null +++ b/_docs/quick-start/ci-quickstart/create-ci-pipeline.md @@ -0,0 +1,577 @@ +--- +title: "CI pipeline quick start" +description: "Quick start to set up a continuous integration (CI) pipeline" +group: quick-start +toc: true +--- + +This quick start guides you through setting up a CI (continuous integration) pipeline in Codefresh to create the Docker image of a sample application, run the pipeline, view results, and optionally upload the Docker image to a public registry. + +This quick start describes the following tasks: + +1. [Create and run CI pipeline](#create-and-run-ci-pipeline) +1. [Run unit tests on the Docker image](#run-unit-tests-on-the-docker-image) +1. [(Optional) Upload Docker images to Docker Hub](#optional-upload-docker-images-to-docker-hub) + + +## CI process overview + +The diagram illustrates the CI (Continuous Integration) process. Read more on [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration){:target="\_blank"}. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/pipeline-overview.jpg" +url="/images/quick-start/quick-start-ci/pipeline-overview.jpg" +alt="CI process overview" +caption="CI process overview" +max-width="100%" +%} + + +1. Connects to GitHub and checks out the source code of the sample application +1. Uses the Dockerfile of the application to create a Docker image +1. Runs unit tests in the same Docker image to verify the validity of the code +1. Stores the Docker image in your private Registry +1. (Optional) Pushes the Docker image to Docker Hub + +### Note on the sample application +For the pipeline, we'll use a sample application, the [Python/Flask](https://www.palletsprojects.com/p/flask/){:target="\_blank"} project, that has: +* Its own [Dockerfile](https://github.com/codefresh-contrib/python-flask-sample-app/blob/master/Dockerfile){:target="\_blank"} in the root of the repository. +* Unit tests. + +You can either use the sample application to follow along, or create your own Docker based example (don't forget to write unit tests). + + +> With Codefresh you can create a Docker image from without any local Docker installation, (Docker building as a service). + + + + + +## CI pipeline quick start prerequisites + +* Codefresh account +* Free [GitHub account](https://github.com/join){:target="\_blank"} +* Docker registry service account, such as [GitHub](https://github.com/features/packages){:target="\_blank"} +* Source code of the sample application +* (Optional) Docker Hub account if you also want to make your image public + +### Download source code of the sample application +We use an example application located in GitHub. + +>Codefresh supports GitLab, Bitbucket and Azure GIT repositories apart from GitHub. The +same principles presented in this tutorial apply for all Git providers. + +1. Go to the GitHub repo with the [Python Flask](https://github.com/codefresh-contrib/python-flask-sample-app) sample application. +1. At the top-right, click **Fork** to bring the source code to your own account. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/fork-example-project.png" +url="/images/quick-start/quick-start-ci/fork-example-project.png" +alt="Forking the example application" +caption="Forking the example application" +max-width="80%" +%} + +After a few minutes, you should see the repo in your Git account. + +## Create and run CI pipeline + +We'll start by focusing on the first part of the CI pipeline, creating a Docker image. + +Docker images play a central role in Codefresh pipelines. They are the basic building blocks that serve as the link +between what your source code generates and what gets deployed. + +The example application already comes with its own Dockerfile. If your own application is not "dockerized" yet, first create a Dockerfile for it, and then move it into the Codefresh infrastructure. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/docker-build-steps.jpg" +url="/images/quick-start/quick-start-ci/docker-build-steps.jpg" +alt="Preparing a Docker image" +caption="Preparing a Docker image" +max-width="60%" +%} + + + + + +Creating a Docker image through a Codefresh pipeline includes these tasks: +1. [Connect a Docker registry to Codefresh](#connect-a-docker-registry-to-codefresh) +1. [Create a project for CI pipeline](#create-a-project-for-ci-pipeline) +1. [Create and run CI pipeline](#create-and-run-ci-pipeline) +1. [View and monitor CI pipeline build](#view-and-monitor-ci-pipeline-build) +1. [View the Docker image stored in Codefresh](#view-the-docker-image-stored-in-codefresh) + +### Connect a Docker registry to Codefresh + +Connect your Docker regsitry to Codefresh to store the Docker image of the sample application. +Codefresh supports all the popular Docker registries. +If you don't already have a registry, we recommend starting with the GitHub Registry for this quick start. + +#### Before you begin +Make sure you have completed all the [prerequsites](#quick-start-prerequisites) for the CI pipeline quick start + +#### How to + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from Configuration, select **Pipeline Integrations**. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Other Registries**. +1. Define the following: + * **Registry name**: A unique name for this configuration. + * **Username**: Your GitHub username. + * **Password**: Your GitHub personal token. + * **Domain**: `ghcr.io`. + * Expand **Advanced Options** and define the [**Repository Prefix**]({{site.baseurl}}/docs/integrations/docker-registries/#using-an-optional-repository-prefix) as your GitHub username. + +{% include image.html + lightbox="true" + file="/images/integrations/docker-registries/github/github-registry-codefresh.png" + url="/images/integrations/docker-registries/github/github-registry-codefresh.png" + alt="GitHub Container Registry settings" + caption="GitHub Container Registry settings" + max-width="70%" +%} + +{:start="5"} +1. To verify the connection details, click **Test Connection**. +1. To apply the changes, click **Save**. +1. Continue with [Create a project for CI pipeline](#create-a-project-for-ci-pipeline) + +### Create a project for CI pipeline + +Codefresh pipelines are grouped within projects. Think of a project as a folder or directory that groups related pipelines. For example, all pipleines that package/deploy the different microservices for an application. +You can define any name for the project, the most common example being the name of the application that the pipelines build and deploy. + +#### Before you begin + +* [Connect a Docker registry to Codefresh]((#connect-a-docker-registry-to-codefresh)) + +#### How to +1. In the Codefresh UI, expand Pipelines in the sidebar, and select **Projects**. +1. On the top-right, click **New Project**. +1. Enter the **Project Name**. For example, `my-first-project`. +1. Leave the **Project Tags** empty for the quick start. +1. Select any **Icon** you want. The icon is prefixed to the project name in the Projects page. +1. Click **Create**. + +{% include image.html + lightbox="true" + file="/images/quick-start/quick-start-ci/create-ci-project.png" + url="/images/quick-start/quick-start-ci/create-ci-project.png" + alt="Create project for CI pipeline" + caption="Create project for CI pipeline" + max-width="70%" +%} + + You now have a new project and can create your CI first pipeline within the project. + +{:start="6"} +1. Continue with [Create and run the CI pipeline](#create-and-run-ci-pipeline). + +### Create and run CI pipeline +Create a CI pipeline to clone the Git repo and build the Docker image. +The pipeline includes two steps: + * A [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step to check out the code. + The clone step also uses built-in variables that ensures that the pipeline checks out the exact code described in the commit of the trigger. Don't worry if the exact details are not clear to you yet. + * A [build]({{site.baseurl}}/docs/pipelines/steps/build/) step to build the docker image **AND** push it to the connected Docker registry. + The build step uses a Dockerfile that is located at the root folder of the project and creates a Docker image with tag `v1.0.0`. + +#### Before you begin +* [Create a project for CI pipeline](#create-a-project-for-ci-pipeline) + +#### How to +1. From the Project page, select the project you created. +1. Click **Create Pipeline**. +1. Define the following: + * **Project**: The project is already selected. + * **Pipeline Name**: Enter a name for the pipeline. + * **Add Git repository**: Toggle to on. This setting launches the pipeline when there is a commit to the Git repository. + * **Add Git clone step to pipeline**: Select the repository with the sample application you forked from the list. +1. Click **Create**. + In the Workflow tab of the pipeline creation workspace, you'll see that the Inline YAML editor already has a sample YAML. + +{% include image.html + lightbox="true" + file="/images/quick-start/quick-start-ci/create-pipeline.png" + url="//images/quick-start/quick-start-ci/create-pipeline.png" + alt="Define pipeline settings" + caption="Define pipeline settings" + max-width="70%" +%} + +{:start="5"} +1. Copy and paste the `codefresh.yaml` below into the Inline editor to replace the existing content. +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - package +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: checkout + MyAppDockerImage: + title: Building Docker Image + type: build + stage: package + image_name: my-app-image + working_directory: ./ + tag: v1.0.0 + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +{:start="6"} +1. To apply your changes, click **Save**. +1. To start the pipeline, click **Run**. +1. Retain the default settings, and click **Run** once again. +1. Continue with [View and monitor CI pipeline build](#view-and-monitor-ci-pipeline-build). + +### View and monitor CI pipeline build +When you run the pipeline, Codefresh takes you to the Builds page where you can monitor the build progress of the sample application. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/building.png" +url="/images/quick-start/quick-start-ci/building.png" +alt="Monitoring the build run" +caption="Monitoring the build" +max-width="50%" +%} + +#### Before you begin +* [Create and run CI pipeline](#create-and-run-ci-pipeline) + +#### How to +1. Click on the step **Building Docker Image**, and view the logs in the **Output** tab. + You can download the logs in HTML or text formats if you want to review them offline. + +The build should complete successfully. + +All previous runs are displayed in the [Builds page](https://g.codefresh.io/builds). + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/finished-build.png" +url="/images/quick-start/quick-start-ci/finished-build.png" +alt="Build details" +caption="Build details" +max-width="80%" +%} + +1. Continue with [View Docker image stored in Codefresh](#view-docker-image-stored-in-codefresh). + +### View Docker image stored in Codefresh + +The build logs show that a Docker image is created after each successful build. Where does this image go? + +Codefresh has the unique feature where the build step that creates the Docker image, also automatically pushes the image to your default Docker registry! All the images that we have created so far, are stored in the registry you connected at the beginning of the quick start. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/docker-store-stage.jpg" +url="/images/quick-start/quick-start-ci/docker-store-stage.jpg" +alt="Automatic storage of Docker images" +caption="Automatic storage of Docker images" +max-width="80%" +%} + +#### Before you begin +[View and monitor CI pipeline build](#view-and-monitor-ci-pipeline-build) + +#### How to +1. In the Codefresh UI, expand **Artifacts** in the sidebar, and click **Images**. + A list of Docker images are displayed, sorted by the most recently added images. This dashboard is populated in real-time with updates from your Docker registry. + + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/docker-images.png" +url="/images/quick-start/quick-start-ci/docker-images.png" +alt="Recent Docker images" +caption="Recent Docker images (click image to enlarge)" +max-width="100%" +%} + + + +## Run unit tests on the Docker image +Run unit tests to validate the Docker image and confirm that it satisfies the requested functionality. Unit tests must be an integral part of the build process. + +We will add a new [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) to the pipeline's YAML, that runs unit tests. Freestyle steps run custom commands within Docker containers. + +For the quick start, we run the python command [within the docker image]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables), created from the previous step `MyAppDockerImage` defined by the `image` property. + + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/unit-test-stage.jpg" +url="/images/quick-start/quick-start-ci/unit-test-stage.jpg" +alt="Unit tests workflow" +caption="Unit tests workflow" +max-width="80%" +%} + +### Before you begin + +* Verify you have a Docker image in Codefresh + +### How to + +1. In the Builds page, click the **YAML** tab and then click **Edit Pipeline**. + The Inline YAML editor in the Workflow tab is displayed. +1. Paste the following YAML that includes the new step to run unit tests into the Inline editor: + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - package + - test +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: checkout + MyAppDockerImage: + title: Building Docker Image + type: build + stage: package + image_name: my-app-image #Change to your image name + working_directory: ./ + tag: v1.0.1 + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + image: '${{MyAppDockerImage}}' + stage: test + commands: + - pip install pytest + - pytest +{% endraw %} +{% endhighlight %} +{:start="3"} +1. Click **Run** and then click **Run** again to relaunch the build. +1. In the Builds page, you can see a new stage with the step labeled **Running unit tests**. +1. Click the step name to see the test output of the application. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/unit-test-result.png" +url="/images/quick-start/quick-start-ci/unit-test-result.png" +alt="Unit test results" +caption="Unit test results" +max-width="60%" +%} +{:start="6"} +1. Continue with [(Optional) Upload Docker images to Docker Hub](#optional-upload-docker-images-to-docker-hub). + +> Tip: + Codefresh can also run [integration tests]({{site.baseurl}}/docs/pipelines/steps/composition/), and report [test results]({{site.baseurl}}/docs/testing/test-reports/). + Therefore, regardless of the type of tests you employ, Codefresh can accommodate your testing process in a fully automated manner as part of the main build. + + +## (Optional) Upload Docker images to Docker Hub + +As we saw in the previous task, Codefresh automatically pushes the Docker image to the default Docker registry. +You can also upload the same Docker image to a different external registry or make it public in Docker Hub. + +The [push pipeline step]({{site.baseurl}}/docs/codefresh-yaml/steps/build/) does exactly that. It pushes an existing Docker image to the registry you define. + + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/docker-push-stage.jpg" +url="/images/quick-start/quick-start-ci/docker-push-stage.jpg" +alt="Pushing a Docker image" +caption="Pushing a Docker image" +max-width="80%" +%} + +As the last task in the CI pipeline quick start, we will push our sample application to [Docker Hub](https://cloud.docker.com/){:target="\_blank"}, the free public hosting registry of Docker Inc. +To do so, you will first need to create a free account in Docker Hub and then connect Docker Hub to Codefresh. + + +### Before you begin +* Verify you have a Docker image in Codefresh + + +### How to +1. Create a free account in Docker Hub. +1. Note down your username and password. + In your own projects, you can use any other [external registry]({{site.baseurl}}/docs/integrations/docker-registries/). + + >Docker.io allows you to only push images tagged with your username. + If you can do so, create a Docker Hub account with the same username that you have in Codefresh. + If not, you need to change the Docker image created to match your username in your Docker Hub account. + +{:start="3"} +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from Configuration, select **Pipeline Integrations**. +1. Select **Docker Registries** and then click **Configure**. +1. From the **Add Registry Provider** dropdown, select **Docker Hub**. +1. Define the following: + * **Registry name**: Enter a unique name for this configuration. Note down the regsitry name. You will need it to define it in the pipeline. + * **Username**: Your Docker Hub username. + * **Password**: Your Docker Hub password. + * To verify the connection details, click **Test Connection**. You should see a success message. +1. To apply the changes, click **Save**. + You have now connected your Docker Hub account to your Codefresh account. + + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/add-docker-hub.png" +url="/images/quick-start/quick-start-ci/add-docker-hub.png" +alt="Docker Hub registry integration" +caption="Docker Hub registry integration" +max-width="60%" +%} + +{:start="8"} +1. Go back to the [Builds](https://g.codefresh.io/builds2){:target="\_blank"} page, and click the pipeline to return to the Inline editor. +1. Paste the following YAML to replace the existing content. + We added a [push step]({{site.baseurl}}/docs/codefresh-yaml/steps/push/) at the end of the pipeline. The image is tagged with the name of the branch. +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - package + - test + - upload +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: checkout + MyAppDockerImage: + title: Building Docker Image + type: build + stage: package + image_name: my-app-image + working_directory: ./ + tag: v1.0.1 + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + image: '${{MyAppDockerImage}}' + stage: test + commands: + - pip install pytest + - pytest + MyPushStep: + title: Pushing to Docker Registry + type: push + stage: upload + tag: '${{CF_BRANCH}}' + candidate: '${{MyAppDockerImage}}' + image_name: kkapelon/pythonflasksampleapp #Change kkapelon to your dockerhub username + registry: dockerhub # Name of your integration as was defined in the Registry screen +{% endraw %} +{% endhighlight %} +{:start="10"} +1. Replace the `registry` name in the push step with the name you specified in the Docker Hub integration settings. +1. To apply your changes, click **Save**, and then click **Run** to start the pipeline again. +1. Click the **Pushing to Docker registry** step to view the log and monitor the progress of the push step. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/docker-pushing.png" +url="/images/quick-start/quick-start-ci/docker-pushing.png" +alt="Pushing to Docker Hub" +caption="Pushing to Docker Hub (click image to enlarge)" +max-width="70%" +%} + +> Now you have two Docker registries connected to Codefresh. After the build completes execution, the Docker image of the sample application is stored **both** in the default Docker registry and in Docker Hub. + +To verify the latter, visit your profile in Docker Hub and look at the image details: + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-ci/docker-hub.png" +url="/images/quick-start/quick-start-ci/docker-hub.png" +alt="Image details in Docker Hub" +caption="Image details in Docker Hub" +max-width="60%" +%} + + +You have now completed the final task in the CI pipeline quick start. + +Continue with: +[Kubernetes deployment quick start]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-to-kubernetes/) +OR +[Helm deployment to Kubernetes quick start]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm) + + +## Read more on pipelines & Docker registries +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh pipeline definitions YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Working with Docker registries]({{site.baseurl}}/docs/ci-cd-guides/working-with-docker-registries/) + diff --git a/_docs/quick-start/ci-quickstart/deploy-to-kubernetes.md b/_docs/quick-start/ci-quickstart/deploy-to-kubernetes.md new file mode 100644 index 000000000..b43abc31a --- /dev/null +++ b/_docs/quick-start/ci-quickstart/deploy-to-kubernetes.md @@ -0,0 +1,258 @@ +--- +title: "Kubernetes deployment quick start" +description: "How to deploy to a Kubernetes cluster from the Codefresh UI" +group: quick-start +sub_group: ci-quickstart +toc: true +--- + +This quick start will guide you through deploying the Docker image you created to a Kubernetes cluster, both manually through the Codefresh UI, and automatically through a pipeline. Deploying the image through a pipeline, redeploys the image automatically when there are changes in the source code. + +For the quick start, we will use the Codefresh UI to create both the Kubernetes service inside the cluster and the CI/CD pipeline that keeps it up to date. + + +At the end of Kubernetes deployment quick start, we will have a pipeline that: + +1. Checks out code from GitHub and creates a Docker image. +1. Stores it in the default Docker registry connected to your Codefresh account. +1. Notifies the Kubernetes cluster that a new version of the application is present. Kubernetes will pull the new image and deploy it. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/overview.png" +url="/images/quick-start/quick-start-k8s/overview.png" +alt="Deployment to Kubernetes cluster" +caption="Deployment to Kubernetes cluster" +max-width="80%" +%} + + + +## Prerequisites for Kubernetes quick start + +* [Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) in Codefresh +* The Docker registry you connected to your Codefresh account in the CI pipeline quick start +* Either our sample application or your own application that has a Dockerfile. + +>For the quick start, you **don't** need a Kubernetes deployment file. Codefresh creates one for you through the UI. + + + +## Manually deploy Docker image to Kubernetes + +Deploy the Docker image to your Kubernetes cluster without writing any configuration files at all. + +1. Get the name of the Docker image you created: + 1. In the Codefresh UI, expand Artifacts in the sidebar, and select **Images**. + 1. Click the Docker image you created, and then click **more details** on the right. + 1. In the **Summary** tab, copy the image name from Image Info. + + + + + +>Do not use `latest` for your deployments. This doesn't help you to understand which version is deployed. + Use either branch names or even better git hashes so that you know exactly what is deployed on your Kubernetes cluster. + + + Notice also that the YAML manifest that Codefresh creates has an image pull policy of `always`, so the cluster will always redeploy the latest image even if it has the same name as the previous one. + +{:start="2"} +1. In the Codefresh UI, expand Ops from the sidebar, and select **Kubernetes Services**. + Codefresh displays the deployments (pods and namespaces) in your Kubernetes cluster. +1. On the top-right, click **New**, and then select **Add Service**. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/add-service-button.png" +url="/images/quick-start/quick-start-k8s/add-service-button.png" +alt="Codefresh Kubernetes Dashboard" +caption="Codefresh Kubernetes Dashboard" +max-width="70%" +%} + +{:start="4"} +1. Create a Kubernetes deployment (and associated service): + * **Cluster**: The cluster to which to deploy your image. If you have more than one cluster, select the cluster. + * **Namespace**: The namespace in the cluster to which to deploy the application. For the quick start, retain **default**. + * **Service Name**: An arbitrary name for your service. + * **Replicas**: The number of replicas to create for resiliency. For the quick start, we'll define **1**. + * **Expose Port**: Select to make your application available outside the cluster and users can access it. + * **Image**: The fully qualified name of your Docker image that you copied from the Images dashboard. By default, Codefresh appends the branch name of a git commit to the resulting Docker image. This is why +we used the branch name as tag. + * **Image Pull Secret**: Select your default Docker registry and create a pull secret for it. + * **Internal Ports**: The port exposed from your application. The example Python app we deploy, exposes `5000`. +1. Click **Deploy**. Codefresh creates a Kubernetes YAML file behind the scenes and apply it to your Kubernetes cluster. + The cluster: + * Pulls the image from the Codefresh registry + * Creates all the needed resources (service, deployments, pods) to make the application available +1. Monitor the status of the deployment in the UI. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/after-deployment.png" +url="/images/quick-start/quick-start-k8s/after-deployment.png" +alt="Codefresh K8s deployment" +caption="Codefresh K8s deployment" +max-width="70%" +%} + +Once the deployment is complete, you can see the public URL of the application. You can visit it in the browser +and see the live application. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/before-change.png" +url="/images/quick-start/quick-start-k8s/before-change.png" +alt="Example Python Application" +caption="Example Python Application" +max-width="50%" +%} + +You have completed deploying a Docker image manually to a Kubernetes cluster without writing any YAML files at all! + +In the following task, we will automate the deployment process, so that every commit in Git, redploys the application. + +## Automatically deploy Docker image to Kubernetes + +Set up a pipeline in Codefresh so that any commits in GitHub automatically redeploys the application, giving us a true CI/CD pipeline. +To do this, we will add a new [deploy step]({{site.baseurl}}/docs/pipelines/steps/deploy/) at the end of the pipeline. Deploy steps allow you to deploy Kubernetes applications in a declarative manner. + + +>Remember that the application itself is already running successfully in the Kubernetes cluster after the manual deployment. + +1. In the Codefresh UI, expand Pipelines in the sidebar, and select **Pipelines**. +1. From the pipeline list, select the pipeline you created. +1. Switch to the **Workflows** tab. +1. Replace the existing content in the Inline YAML editor with the example below. +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - package + - test + - upload + - deploy +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: checkout + MyAppDockerImage: + title: Building Docker Image + type: build + stage: package + image_name: my-app-image + working_directory: ./ + tag: '${{CF_BRANCH}}' + dockerfile: Dockerfile + disable_push: true + MyUnitTests: + title: Running Unit tests + image: '${{MyAppDockerImage}}' + stage: test + commands: + - python setup.py test + MyPushStep: + title: Pushing to DockerHub Registry + type: push + stage: upload + tag: '${{CF_BRANCH}}' + candidate: '${{MyAppDockerImage}}' + image_name: kkapelon/pythonflasksampleapp #Change kkapelon to your dockerhub username + registry: dockerhub # Name of your integration as was defined in the Registry screen + DeployToMyCluster: + title: deploying to cluster + type: deploy + stage: deploy + kind: kubernetes + ## cluster name as the shown in account's integration page + cluster: my-demo-k8s-cluster + # desired namespace + namespace: default + service: python-demo + candidate: + # The image that will replace the original deployment image + # The image that been build using Build step + image: kkapelon/pythonflasksampleapp:${{CF_BRANCH}} + # The registry that the user's Kubernetes cluster can pull the image from + # Codefresh will generate (if not found) secret and add it to the deployment so the Kubernetes master can pull it + registry: dockerhub +{% endraw %} +{% endhighlight %} +{:start="5"} +1. Click **Save**. + The deploy step updates the *existing* Kubernetes deployment. + * If needed, the step creates a [pull secret]({{site.baseurl}}/docs/ci-cd-guides/access-docker-registry-from-kubernetes/) for the image. + * It does not create any Kubernetes services, as we already created a Kubernetes service when we manually deployed the image. +1. Modify the application in the production branch, and commit/push the change to Git. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/git-change.png" +url="/images/quick-start/quick-start-k8s/git-change.png" +alt="Commit change to Git" +caption="Commit change to Git" +max-width="70%" +%} + + Codefresh automatically identifies the change and [triggers]({{site.baseurl}}/docs/pipeline/triggers/) a new build that deploys the new version: + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/deployment-build.png" +url="/images/quick-start/quick-start-k8s/deployment-build.png" +alt="Codefresh K8s deployment" +caption="Codefresh K8s deployment" +max-width="90%" +%} + + +Once the build is complete, if you visit the URL, you will see your change applied. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-k8s/after-change.png" +url="/images/quick-start/quick-start-k8s/after-change.png" +alt="Example Python Application after change" +caption="Example Python Application after change" +max-width="50%" +%} + +You now have a complete CI/CD pipeline in Codefresh that executes fully automated builds to Kubernetes. + +Continue with: +[Helm deployment to Kubernetes quick start]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-with-helm) +OR +[On-demand environment quick start]({{site.baseurl}}/docs/quick-start/ci-quickstart/on-demand-environments) + + +## Read more on Kubernetes deployments & pipelines +[Deployment options for Kubernetes]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/) +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh pipeline definitions YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + + + + + + + + + + + + + + diff --git a/_docs/quick-start/ci-quickstart/deploy-with-helm.md b/_docs/quick-start/ci-quickstart/deploy-with-helm.md new file mode 100644 index 000000000..35173d4d3 --- /dev/null +++ b/_docs/quick-start/ci-quickstart/deploy-with-helm.md @@ -0,0 +1,371 @@ +--- +title: "Helm deployment to Kubernetes quick start" +description: "Use the Helm package manager to deploy to a Kubernetes cluster from the Codefresh UI" +group: quick-start +sub_group: ci-quickstart +toc: true +--- + +The [Kubernetes deployment quick start]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-to-kubernetes/) showed you how to quickly deploy an application directly to Kubernetes. + +The Helm quick start guides you through using [Helm](https://helm.sh/){:target="\_blank"} as a package manager in Codefresh to deploy to Kubernetes, view the Helm release, and store a Helm chart. + +[Helm](https://helm.sh/){:target="\_blank"} is similar to other package managers (yum, apt, npm, maven, pip, gems), but works at the application level allowing you to deploy multiple manifests together. + + + + + + + + + +This quick start will show you how to: + +1. Deploy a Helm release to your Kubernetes cluster +1. View the Helm release +1. Store a Helm package inside the integrated Codefresh repository + + + +For reasons of simplicity, we will use the [default Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/#the-default-registry) that is set up globally in your Codefresh account. For your own application you can also use any other of your registries even if it is not the default. + + +## Prerequisites +* A [Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/add-kubernetes-cluster/) in Codefresh +* The Docker registry you connected to your Codefresh account in the CI pipeline quick start +* An application that has a Dockerfile and a [Helm chart]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#helm-setup) +* Cluster with pull access to your default Docker registry + If you are not familiar, read [Manually deploy Docker image to Kubernetes]({{site.baseurl}}/docs/quick-start/ci-quickstart/deploy-to-kubernetes/#manually-deploy-docker-image-to-kubernetes), or read about [secrets]({{site.baseurl}}/docs/deployments/kubernetes/deploy-to-kubernetes/create-image-pull-secret/) + +If you want to follow along, feel free to fork this [repository](https://github.com/codefresh-contrib/python-flask-sample-app){:target="\_blank"} in your Git account and look at the [with-helm](https://github.com/codefresh-contrib/python-flask-sample-app/tree/with-helm){:target="\_blank"} branch. + +## Deploy a Helm Release to your Kubernetes cluster + + + + + + +Codefresh provides a special [Helm step](https://codefresh.io/steps/step/helm){:target="\_blank"} that you can use to perform a deployment. +We will create a new pipeline with the Helm deploy step and run it to deploy the release to your Kubernetes cluster. + + +The `DeployMyChart` Helm step has three environment variables: +* `chart_name` points to the [chart in the Git repository](https://github.com/codefresh-contrib/python-flask-sample-app/tree/with-helm/charts/python){:target="\_blank"}. +* `release_name` defines the name of the deployment to be created in the cluster. +* `kube_context` defines which cluster to be used. The name is the same as that of the cluster you added [Codefresh Integrations](https://g.codefresh.io/account-admin/account-conf/integration/kubernetes). + +The step deploys the Helm chart using the default values as found in `values.yaml` within the chart folder. +It makes sense to override the defaults using some parameters in the build. For example, instead of tagging the Docker image with the branch name (which is always the same for each build), we could tag it with the hash of the source revision. + +The `custom_values` override the default chart values. The underscores are replaced with dots. +Here we override the name of tag (to match the Docker image built in the previous step) and the pull policy. + + + +1. From the Project page, select the project you created. +1. Click **Create Pipeline**. +1. Define the following: + * **Project**: The project is already selected. + * **Pipeline Name**: Enter a name for the pipeline that will deploy with Helm. + * **Add Git repository**: Toggle to on. This setting launches the pipeline when there is a commit to the Git repository. + * **Add Git clone step to pipeline**: Select the repository with the sample application you forked from the list. +1. Click **Create**. + In the Workflow tab, you'll see that the Inline YAML editor already has a sample YAML. +1. Replace the existing content in the Inline YAML editor with the example below. +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - package + - deploy +steps: + clone: + title: Cloning main repository... + type: git-clone + arguments: + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: checkout + BuildingDockerImage: + title: Building Docker Image + type: build + arguments: + image_name: my-flask-app + working_directory: ./python-flask-sample-app + tag: '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile + stage: package + DeployMyChart: + type: helm + stage: deploy + working_directory: ./python-flask-sample-app + arguments: + action: install + chart_name: charts/python + release_name: my-python-chart + helm_version: 3.0.2 + kube_context: kostis-demo@FirstKubernetes + custom_values: + - 'buildID=${{CF_BUILD_ID}}' + - 'image_pullPolicy=Always' + - 'repository=r.cfcr.io/kostis-codefresh/my-flask-app' + - 'image_tag=${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + - 'image_pullSecret=codefresh-generated-r.cfcr.io-cfcr-default' +{% endraw %} +{% endhighlight %} +{:start="5"} +1. Click **Save** and then click **Run** twice to run the pipeline. +1. Continue with [View Helm release](#view-helm-release). + +You can see the value replacements in the Helm logs inside the pipeline: + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/helm-logs.png" +url="/images/quick-start/quick-start-helm/helm-logs.png" +alt="Helm Value replacement" +caption="Helm Value replacement" +max-width="100%" +%} + + +This is the easiest way to deploy to Kubernetes without having to manually change values in manifests. Helm and Codefresh handle the replacements using the built-in steps. + +## View Helm release + +When a Helm package is deployed to your Kubernetes cluster, Codefresh displays it in the [Helm releases]({{site.baseurl}}/docs/new-helm/helm-releases-management/) dashboard. + +### Before you begin +* [Deploy a Helm Release to your Kubernetes cluster](#deploy-a-helm-release-to-your-kubernetes-cluster) + +### How to + +1. In the Codefresh UI, expand Ops from the sidebar, and select **Helm Releases**. + The new release is displayed. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/helm-release-details.png" +url="/images/quick-start/quick-start-helm/helm-release-details.png" +alt="Helm Releases dashboard with new release" +caption="Helm Releases dashboard with new release" +max-width="90%" +%} + + + +{:start="2"} +1. Click on the release and get information regarding its chart, its revisions, changed files and the resulting manifests. + +The build values that we defined in the `codefresh.yml` are shown here in a separate tab so it is very easy to +verify the correct parameters. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/helm-values.png" +url="/images/quick-start/quick-start-helm/helm-values.png" +alt="Helm values" +caption="Helm values" +max-width="70%" +%} + +>Tip: + To view the services/pods/deployments that comprise the helm release, go to the [Kubernetes Services dashboard](https://g.codefresh.io/kubernetes/services/){:target="\_blank"}. + + +## Roll back a Helm release + + + +With Helm, you can rollback a Helm release to a previous version without actually re-running the pipeline. +Helm gives you easy rollbacks for free. If you make some commits in your project, Helm retains the same deployment and adds new revisions on it. The server part of Helm keeps a history of all releases, and knows the exact contents of each respective Helm package. +As the rollback creates a new revision, you can move backwards and forwards in time to any revision. + +1. In the Codefresh UI, expand Ops from the sidebar, and select **Helm Releases**. +1. Click on the release, and then select the **History** tab. +1. From the list of revisions, select one as the rollback target. + + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/helm-rollback.png" +url="/images/quick-start/quick-start-helm/helm-rollback.png" +alt="Helm rollback" +caption="Helm rollback" +max-width="70%" +%} + + + +## Store a Helm chart + +Codefresh includes a [built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/), available to all accounts. Like any other public Helm repository, you can store charts in this repository, and also manually deploy applications from it. + +For the quick start, we will modify the pipeline with the Helm deploy step you created in the previous task. +We use the same `helm` step, but instead of deploying the Helm package which is the default action, we define `push` as the action. The pipeline now stores the Helm chart in the internal repository. + +To store a Helm chart, you either need to import the shared configuration that defines the integrated Helm repository, or define the repository URL directly. +For the quick start, we will import the shared configuration. + + +### Before you begin +* [Create and run pipeline with a Helm step](#create-and-run-pipeline-with-a-helm-step) + +### How to + +1. In the Codefresh UI, expand Pipelines in the sidebar, and select **Pipelines**. +1. Select the pipeline with the Helm deploy step. +1. In the Workflow tab, click the **Variables** tab on the right. +1. From the Variables toolbar, click the context menu, and then select **Add Shared Configuration**. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/helm-select-shared-configuration.png" +url="/images/getting-started/quick-start-helm/helm-select-shared-configuration.png" +alt="Add Shared Configuration option" +caption="Add Shared Configuration option" +max-width="70%" +%} + +{:start="5"} +1. From the list, select **CF_HELM_DEFAULT** which defines the configuration of the integrated Helm repository, and click **Add**. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/import-helm-repo-conf.png" +url="/images/quick-start/quick-start-helm/import-helm-repo-conf.png" +alt="Helm settings" +caption="Import Helm repository configuration" +max-width="70%" +%} + +{:start="6"} +1. Go to the Inline YAML editor with the pipeline definition and modify the deploy step in your `codefresh.yml`: +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - package + - deploy +steps: + clone: + title: Cloning main repository... + type: git-clone + arguments: + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: checkout + BuildingDockerImage: + title: Building Docker Image + type: build + working_directory: ${{clone}} + arguments: + image_name: my-flask-app + tag: '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile + stage: package + deploy: + title: Storing Helm chart + type: helm + stage: deploy + working_directory: ./python-flask-sample-app + arguments: + action: push + chart_name: charts/python + helm_version: 3.0.2 + kube_context: 'mydemoAkscluster@BizSpark Plus' +{% endraw %} +{% endhighlight %} + + As you can see, the `action` argument has `push` as its value. + +{:start="7"} +1. Click **Save** and then click **Run** twice to run the pipeline. +1. View the pipeline build in the [Helm releases]({{site.baseurl}}/docs/new-helm/helm-releases-management/) dashboard. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/helm-only-store.png" +url="/images/quick-start/quick-start-helm/helm-only-store.png" +alt="Storing Helm chart" +caption="Storing Helm chart" +max-width="100%" +%} + +{:start="8"}click the +1. When the pipeline build completes execution, expand Artifacts from the sidebar, and select **Helm charts**. + + {% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-helm/helm-repo.png" +url="/images/quick-start/quick-start-helm/helm-repo.png" +alt="Helm chart in repository" +caption="Helm chart in repository" +max-width="70%" +%} + + +> You can click the **Install** button to manually deploy manually the chart. Codefresh will allow to enter your own values in that case and also select your target cluster. + + + +You now know how to deploy a Helm release from Codefresh, view the release, and store the Helm chart in a repository. + +Continue with: +[On-demand environment quick start]({{site.baseurl}}/docs/quick-start/ci-quickstart/on-demand-environments) + + +## Read more on deployments with Helm +[Codefresh built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[Using Helm in Codefresh pipelines]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm pipeline example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/) +[Codefresh pipeline definitions YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + diff --git a/_docs/quick-start/ci-quickstart/on-demand-environments.md b/_docs/quick-start/ci-quickstart/on-demand-environments.md new file mode 100644 index 000000000..4e07f5766 --- /dev/null +++ b/_docs/quick-start/ci-quickstart/on-demand-environments.md @@ -0,0 +1,112 @@ +--- +title: "On-demand environment quick start" +description: "Code collaboration with Codefresh" +group: quick-start +sub_group: ci-quickstart +toc: true + +--- + +This quick start guides you through creating separate demo environments to test different features that developers are working on. + +Codefresh has the unique capability of launching Docker images in temporary test environments. These test environments +are ephemeral, and are perfect for quick demos. Use them to quickly share a feature with a colleague or a customer. + +> Test environments are not intended to be used as QA (let alone production) environments. + +## Launch a Docker image using Codefresh + +Docker images play a central role in Codefresh. +In the [CI pipeline quick start]({{site.baseurl}}/docs/quick-start/ci-quickstart/create-ci-pipeline/), you saw how to easily create a Docker image from your source code. + +In this quick start, we will take this one step further, and launch the resulting Docker image. +Codefresh has the unique capability of launching a Docker image (using [Docker Swarm](https://docs.docker.com/engine/swarm/) behind the scenes) on the same hosted environment that Codefresh itself runs on. +This means that with zero effort from your side, you can quickly inspect the status of your application using the Codefresh infrastructure. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-test-pr/demo-stage.jpg" +url="/images/quick-start/quick-start-test-pr/demo-stage.jpg" +max-width="80%" +%} + + + +1. In the Codefresh UI, go to [Images](https://g.codefresh.io/2.0/images){:target="\_blank"}. +1. Select the image and click **Launch**. + + +1. As our sample application consists of only a single Docker image, select **standalone**. + Codefresh automatically knows which port should be exposed in the test environment, that is, which port of the Docker container should be made available for external connections. + Our sample application exposes its web interface at port 5000, but a random port is actually assigned for external connections. +1. Once the application is launched, Codefresh displays the run log. You will see the same messages that would appear if you executed the `docker run` command locally. + +{% include +image.html +lightbox="true" +file="/images/getting-started/quick-start-test-pr/launch-url.png" +url="/images/getting-started/quick-start-test-pr/launch-url.png" +alt="Start logs" +caption="Start logs (click image to enlarge)" +max-width="60%" +%} + +## Access the test environment + +Once launch is complete, Codefresh prints a dynamic URL for the deployed environment. +Now you have a demo environment created just for you! You can send this link with an email to a colleague to ask for feedback or to a customer to show progress. + + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-test-pr/demo-environment.png" +url="/images/quick-start/quick-start-test-pr/demo-environment.png" +alt="Test environment" +caption="Test environment" +max-width="60%" +%} + +>If the environment is not functioning correctly for your own application, make sure that the port exposed by Codefresh in the **Launch settings** is the one that is actually used in your application as an HTTP endpoint. + +You can also view your on-demand environment in Codefresh, and get additional details on the environment. + +1. In the Codefresh UI, go to [Compositions](https://g.codefresh.io/compositions){:target="\_blank"}. +1. Click the **Running Compositions** tab. + You will see the test environment you created, and details such as: + * The branch from which this environment is created + * The Git commit that represents this environment + * The URL endpoint created by Codefresh + * On the right, action button that allow you to visit the environment directly, share the link on Slack, and most importantly, stop the environment, so that it doesn't count against your account. + It is a good practice to launch environments only when you need them and clean them up once you are done with them. + +{% include +image.html +lightbox="true" +file="/images/quick-start/quick-start-test-pr/env-details.png" +url="/images/quick-start/quick-start-test-pr/env-details.png" +alt="Details for an active on-demand environment" +caption="Details for active on-demand environment" +max-width="70%" +%} + + + +## Read more +[Kubernetes deployment quick start]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + + diff --git a/_docs/quick-start/create-codefresh-account.md b/_docs/quick-start/create-codefresh-account.md new file mode 100644 index 000000000..510c73569 --- /dev/null +++ b/_docs/quick-start/create-codefresh-account.md @@ -0,0 +1,111 @@ +--- +title: "Create Codefresh account" +description: "Start working in Codefresh" +group: quick-start +toc: true +--- +Once you have installed Codefresh, the next step is to create an account in Codefresh. + +This quick start guides you through creating an account in Codefresh: + +After you select the IdP (identity provider), Codefresh requests permission to access your basic details, and for Git providers, to access your Git repositories. The Permissions window that is displayed differs according to the IdP selected. + +The permissions requested by Codefresh are needed in order to build and deploy your projects. + +Codefresh currently supports the following IdPs: +* GitHub +* Bitbucket +* GitLab +* Azure +* Google +* LDAP + +If you need an IdP that is not in the list, please [contact us](https://codefresh.io/contact-us/) with the details. + +>NOTES: + For Git repositories, the login method is less important, as you can access Git repositories through [Git integrations]({{site.baseurl}}/docs/integrations/git-providers/), regardless of your sign-up process. + + If you have multiple sign-up methods, as long as you use the same email address in all the sign-ups, Codefresh automatically redirects you to the account dashboard. + +1. Select the identity provider (IdP) to use: + 1. Go to the [Codefresh Sign Up page](https://g.codefresh.io/signup). + 1. Select the IdP for sign-up. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/select-identity-provider.png" +url="/images/administration/create-account/select-identity-provider.png" +alt="Codefresh sign-up page" +caption="Codefresh sign-up page" +max-width="40%" +%} + +{:start="2"} +1. Accept the permissions request for the selected IdP: + * For GitHub: To continue, click **Authorize codefresh-io**. + * For Bitbucket: To continue, click **Grant access**. + * For GitLab: To continue, click **Authorize**. + + Once you confirm the permissions for your Git provider, Codefresh automatically connects to your Git provider and fetches your basic account details, such as your email. + +{:start="3"} +1. Review the details for your new account, make the relevant changes, and click **NEXT**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-signup.png" +url="/images/administration/create-account/codefresh-signup.png" +alt="Codefresh account details" +caption="Codefresh account details" +max-width="40%" +%} + +{:start="4"} +1. Enter a name for your account, and click **NEXT**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-accountname.png" +url="/images/administration/create-account/codefresh-accountname.png" +alt="Codefresh account name" +caption="Codefresh account name" +max-width="40%" +%} + +{:start="5"} +1. Finally, answer the questions to personalize your account and click **FINISH**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-personalize.png" +url="/images/administration/create-account/codefresh-personalize.png" +alt="Codefresh personalize account" +caption="Codefresh personalize account" +max-width="40%" +%} + +Congratulations! Your new Codefresh account is now ready. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-dashboard.png" +url="/images/administration/create-account/codefresh-dashboard.png" +alt="Codefresh dashboard" +caption="Codefresh dashboard" +max-width="40%" +%} + + +## Related quick starts +[CI/CD quick starts]({{site.baseurl}}/docs/quick-start/ci-quick-start/) +[GitOps quick starts]({{site.baseurl}}/docs/quick-start/gitops-quick-start/) + + + + + diff --git a/_docs/getting-started/quick-start.md b/_docs/quick-start/gitops-quick-start.md similarity index 56% rename from _docs/getting-started/quick-start.md rename to _docs/quick-start/gitops-quick-start.md index 203c03d09..68a2a9dde 100644 --- a/_docs/getting-started/quick-start.md +++ b/_docs/quick-start/gitops-quick-start.md @@ -1,55 +1,53 @@ --- -title: "Quick start" +title: "GitOps quick starts" description: "" -group: getting-started +group: quick-start toc: true --- -Check out our quick start tutorial to get up and running in the Codefresh platform with hosted or hybrid runtimes. +Check out our GitOps quick starts to get up and running with GitOps in Codefresh. -The tutorial is divided into these sections: +The quick start guides you through: * Provisioning runtimes * Creating and deploying an application -* Triggering and creating a Delivery Pipeline -Each section indicates the runtime environment it is relevant to. -### Provision runtimes +## Provision runtimes Based on your deployment model, start by provisioning the hosted or hybrid runtime. Hosted and hybrid runtimes can co-exist with each other. -#### Hosted +### Hosted Hosted runtimes are hosted on a Codefresh cluster and managed by Codefresh. You need to provision your hosted runtime once for your account. -1. [Provision a hosted runtime]({{site.baseurl}}/docs/getting-started/quick-start/install-hosted) +1. [Provision a hosted runtime]({{site.baseurl}}/docs/quick-start/gitops-quickstart/install-hosted) Provision the hosted runtime with a single click, and complete the setup for your hosted environment. {::nomarkdown}
            {:/} -#### Hybrid -Hybrid runtimes: Hosted on a customer cluster and managed by the customer. You can provision multiple hybrid runtimes in the same account. +### Hybrid +Hybrid runtimes are hosted on a customer cluster and managed by the customer. You can provision multiple hybrid runtimes in the same account. -1. [Prepare for hosted runtime installation]({{site.baseurl}}/docs/getting-started/quick-start/verify-requirements) +1. [Prepare for hybrid runtime installation]({{site.baseurl}}/docs/quick-start/gitops-quickstart/verify-requirements) Verify your environment matches the requirements for installing Codefresh runtime. -1. [Install hybrid runtime]({{site.baseurl}}/docs/getting-started/quick-start/runtime) +1. [Install hybrid runtime]({{site.baseurl}}/docs/quick-start/gitops-quickstart/runtime) Install the Codefresh runtime by downloading the CLI, installing the runtime, and validate successful installation in the UI -### Deploy an application +## Deploy an application -1. [Create an application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-ui) +1. [Create an application]({{site.baseurl}}/docs/quick-start/gitops-quickstart/create-app-ui) Create the `codefresh-guestbook` application in the Codefresh UI. -1. [Create and commit resources for application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-specs) +1. [Create and commit resources for application]({{site.baseurl}}/docs/quick-start/gitops-quickstart/create-app-specs) Create rollout and service resources, and commit these resources to deploy the `codefresh-guestbook` application. -1. [Update the image tag for application]({{site.baseurl}}/docs/getting-started/quick-start/create-rollout) +1. [Update the image tag for application]({{site.baseurl}}/docs/quick-start/gitops-quickstart/create-rollout) Update the image for the `codefresh-guestbook` application to trigger a rollout. -### Trigger/create a Delivery Pipeline + diff --git a/_docs/getting-started/quick-start/create-app-specs.md b/_docs/quick-start/gitops-quickstart/create-app-specs.md similarity index 100% rename from _docs/getting-started/quick-start/create-app-specs.md rename to _docs/quick-start/gitops-quickstart/create-app-specs.md diff --git a/_docs/getting-started/quick-start/create-app-ui.md b/_docs/quick-start/gitops-quickstart/create-app-ui.md similarity index 100% rename from _docs/getting-started/quick-start/create-app-ui.md rename to _docs/quick-start/gitops-quickstart/create-app-ui.md diff --git a/_docs/getting-started/quick-start/create-github-action-ci.md b/_docs/quick-start/gitops-quickstart/create-github-action-ci.md similarity index 100% rename from _docs/getting-started/quick-start/create-github-action-ci.md rename to _docs/quick-start/gitops-quickstart/create-github-action-ci.md diff --git a/_docs/getting-started/quick-start/create-ci-pipeline.md b/_docs/quick-start/gitops-quickstart/create-gitops-ci-pipeline.md similarity index 100% rename from _docs/getting-started/quick-start/create-ci-pipeline.md rename to _docs/quick-start/gitops-quickstart/create-gitops-ci-pipeline.md diff --git a/_docs/getting-started/quick-start/create-rollout.md b/_docs/quick-start/gitops-quickstart/create-rollout.md similarity index 100% rename from _docs/getting-started/quick-start/create-rollout.md rename to _docs/quick-start/gitops-quickstart/create-rollout.md diff --git a/_docs/getting-started/quick-start/hello-world.md b/_docs/quick-start/gitops-quickstart/hello-world.md similarity index 100% rename from _docs/getting-started/quick-start/hello-world.md rename to _docs/quick-start/gitops-quickstart/hello-world.md diff --git a/_docs/getting-started/quick-start/install-hosted.md b/_docs/quick-start/gitops-quickstart/install-hosted.md similarity index 100% rename from _docs/getting-started/quick-start/install-hosted.md rename to _docs/quick-start/gitops-quickstart/install-hosted.md diff --git a/_docs/getting-started/quick-start/runtime.md b/_docs/quick-start/gitops-quickstart/runtime.md similarity index 100% rename from _docs/getting-started/quick-start/runtime.md rename to _docs/quick-start/gitops-quickstart/runtime.md diff --git a/_docs/getting-started/quick-start/verify-requirements.md b/_docs/quick-start/gitops-quickstart/verify-requirements.md similarity index 84% rename from _docs/getting-started/quick-start/verify-requirements.md rename to _docs/quick-start/gitops-quickstart/verify-requirements.md index 3fc48c3eb..fa8c46faa 100644 --- a/_docs/getting-started/quick-start/verify-requirements.md +++ b/_docs/quick-start/gitops-quickstart/verify-requirements.md @@ -8,7 +8,7 @@ toc: true **New installation** -If this is your first time installing Codefresh, review and confirm that your deployment environment conforms to the minimum requirements for hybrid runtime installation. Check the [system requirements]({{site.baseurl}}/docs/runtime/requirements). +If this is your first time installing GitOps in Codefresh, review and confirm that your deployment environment conforms to the minimum requirements for hybrid runtime installation. Check the [system requirements]({{site.baseurl}}/docs/installation/requirements). **Existing installation** If you already have a hybrid runtime installation on your cluster, you have two options: diff --git a/_docs/reference/behind-the-firewall.md b/_docs/reference/behind-the-firewall.md new file mode 100644 index 000000000..b01ba1388 --- /dev/null +++ b/_docs/reference/behind-the-firewall.md @@ -0,0 +1,248 @@ +--- +title: "Runner installation behind firewalls" +description: "Run Codefresh Pipelines in your own secure infrastructure" +group: installation +redirect_from: + - /docs/enterprise/behind-the-firewall/ +toc: true + +--- + +As described in [installation options]({{site.baseurl}}/docs/installation/installation-options/) Codefresh offers CI/CD and GitOps installation environments, each with its own installation options. +This articles focuses on the CI/CD Hybrid installation option with the Codefresh Runner and its advantages. + +## Running Codefresh CI/CD in secure environments + +Codefresh CI/CD has an on-premises installation in which the Codefresh CI/CD platform is installed on the customer's premises. While +this solution is very effective as far as security is concerned, it places a lot of overhead on the customer, as all updates +and improvements done in the platform must also be transferred to the customer premises. + +Hybrid CI/CD places a Codefresh Runner within the customer premises, and the UI (and management platform) stays in the Codefresh SaaS. + +Here is the overall architecture: + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/architecture.png" + url="/images/administration/behind-the-firewall/architecture.png" + alt="Codefresh Hybrid CD/CD behind the firewall" + caption="Codefresh Hybrid CD/CD behind the firewall" + max-width="100%" + %} + +The advantages for this scenario are multi-fold. + +Regarding platform maintenance: + + 1. Codefresh is responsible for the heavy lifting for platform maintenance, instead of the customer. + 1. Updates to the UI, build engine, integrations etc., happen automatically, without any customer involvement. + 1. Actual builds run in the customer premises under fully controlled conditions. + 1. Codefresh Runner is fully automated. It handles volume claims and build scheduling on its own within the Kubernetes cluster it is placed. + +Regarding security of services: + + 1. Pipelines can run in behind-the-firewall clusters with internal services. + 1. Pipelines can use integrations (such as Docker registries) that are private and secure. + 1. Source code does not ever leave the customer premises. + +Regarding firewall security: + + 1. Uni-directional, outgoing communication between the Codefresh Runner and Codefresh CI/CD Platform. The Runner polls the Codefresh platform for jobs. + 1. Codefresh SaaS never connects to the customer network. No ports need to be open in the customer firewall for the runner to work. + 1. The Codefresh Runner is fully open-sourced, so its code can be scrutinized by any stakeholder. + + + +## Using secure services in your CI pipelines + +After installing the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) on your private Kubernetes cluster in your infrastructure, all CI pipelines in the private Kubernetes cluster have access to all other internal services that are network reachable. + +You can easily create CI pipelines that: + + * Use databases internal to the company + * Run integration tests against services internal to the company + * Launch [compositions]({{site.baseurl}}/docs/pipelines/steps/composition/) that communicate with other secure services + * Upload and download artifacts from a private artifact repository (e.g., Nexus or Artifactory) + * Deploy to any other cluster accessible in the secure network + * Create infrastructure such as machines, load balancers, auto-scaling groups etc. + + Any of these CI pipelines will work out the box without extra configuration. In all cases, + all data stays witin the private local network and does not exit the firewall. + + >Notice that [long-running compositions]({{site.baseurl}}/docs/pipelines/steps/composition/) (preview test environments) are not yet available via the Codefresh build runner. + + + +### Checking out code from a private GIT repository + +To check out code from your private Git repository, you need to connect first to Codefresh via [GIT integrations]({{site.baseurl}}/docs/integrations/git-providers/). However, once you define your GIT provider as *on premise* you also +need to mark it as *behind the firewall* as well: + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/behind-the-firewall-toggle.png" + url="/images/administration/behind-the-firewall/behind-the-firewall-toggle.png" + alt="Behind the firewall toggle" + caption="Behind the firewall toggle" + max-width="100%" + %} + +Once you do that save your provider and make sure that it has the correct tags. The name you used for the git provider will also be used in the pipeline. You cannot "test the connection" because +the Codefresh SAAS doesn't have access to your on-premises GIT repository. + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/behind-the-firewall-tag.png" + url="/images/administration/behind-the-firewall/behind-the-firewall-tag.png" + alt="Behind the firewall tags" + caption="Behind the firewall tags" + max-width="100%" + %} + +To check out code just use a [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) like any other clone operation. +The only thing to remember is that the GIT URL must be fully qualified. You need to [create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-creation-modes) on it its own from the *Pipelines* section of the left sidebar (instead of one adding a git repository to Codefresh) + + + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + type: git-clone + description: Step description + repo: https://github-internal.example.com/my-username/my-app + git: my-internal-git-provider + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: my-image + tag: '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Once you trigger the CI pipeline, the Codefresh builder will communicate with your private GIT instance and checks out code. + +>Note that currently there is a limitation on the location of the `codefresh.yml` file. Only the [inline mode]({{site.baseurl}}/docs/pipelines/pipelines/#writing-codefresh-yml-in-the-gui) is supported. Soon we will allow the loading of the pipeline from the Git repository itself. + +You can also use a [network proxy]({{site.baseurl}}/docs/pipelines/steps/git-clone/#using-git-behind-a-proxy) for the Git clone step. + +#### Adding triggers from private GIT repositories + + +In the previous section we have seen how a CI pipeline can check out code from an internal Git repository. We also need to set up a trigger, +so that every time a commit or any other supported event occurs, the Codefresh CI pipeline is triggered automatically. + +If you have installed the [optional app-proxy]({{site.baseurl}}/docs/installation/codefresh-runner/#optional-installation-of-the-app-proxy), adding a trigger can be done exactly like the SAAS version of Codefresh, using only the Codefresh UI. + +If you haven't installed the app-proxy, then adding a Git trigger is a two-step process: + +1. First we set up a webhook endpoint in Codefresh. +1. Then we create the webhook call in the side of the the GIT provider. + +> To support triggers based on PR (Pull Request) events, it is mandatory to install `app-proxy`. + +For the Codefresh side, follow the usual instructions for creating a [basic git trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/). + +Once you select your GIT provider, you need to manually enter your username and repository that you wish to trigger the build. + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/enter-repo-details.png" + url="/images/administration/behind-the-firewall/enter-repo-details.png" + alt="Entering repository details" + caption="Entering repository details" + max-width="60%" + %} + +All other details (git events, branch naming, monorepo pattern, etc.) are still the same as normal SAAS GIT providers. +Once that is done, Codefresh will show you the webhook endpoint along with a secret for triggering this pipeline. Note them down. + + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/codefresh-webhook.png" + url="/images/administration/behind-the-firewall/codefresh-webhook.png" + alt="Codefresh webhook details" + caption="Codefresh webhook details" + max-width="60%" + %} + +This concludes the setup on the Codefresh side. The final step is create a webhook call on the side of your GIT provider. +The instructions are different per GIT provider: + +* [GitHub webhooks](https://developer.github.com/webhooks/) +* [GitLab webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html) +* [Stash webhooks](https://confluence.atlassian.com/bitbucketserver/managing-webhooks-in-bitbucket-server-938025878.html) + +In all cases make sure that the payload is JSON, because this is what Codefresh expects. + +* For GitHub the events monitored should be `Pull requests` and `Pushes`. +* For GitLab the events monitored should be `Push events`,`Tag push events` and `Merge request events`. + +After the setup is finished, the Codefresh pipeline will be executed every time a git event happens. + +### Accessing an internal docker registry + +To access an internal registry just follow the instructions for [adding registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) . Like GIT repositories +you need to mark the Docker registry as *Behind the firewall*. + +Once that is done, use the [push step]({{site.baseurl}}/docs/codefresh-yaml/steps/push/) as usual with the name you gave to the registry during the integration setup. + + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + gitClone: + type: git-clone + description: Step description + repo: https://github-internal.example.com/my-username/my-app + git: my-internal-git-repo + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: my-image + dockerfile: Dockerfile + PushingDockerImage: + title: Pushing a docker image + type: push + candidate: '${{BuildingDockerImage}}' + tag: '${{CF_BRANCH}}' + registry: my-internal-docker-registry +{% endraw %} +{% endhighlight %} + + +### Deploying to an internal Kubernetes cluster + +To connect a cluster that is behind the firewall follow the [connecting cluster guide]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/), paying attention to the following two points: + +1. Your cluster should be added as a [Custom provider]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/#adding-any-other-cluster-type-not-dependent-on-any-provider) +1. You need to mark the cluster as internal by using the toggle switch. + + + + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/cluster-behind-firewall.png" + url="/images/administration/behind-the-firewall/cluster-behind-firewall.png" + alt="Marking a Kubernetes cluster as internal" + caption="Marking a Kubernetes cluster as internal" + max-width="60%" + %} + +The cluster where the runner works on should have network connectivity with the cluster you wish to deploy to. + +>Notice that the service account used in the cluster configuration is completely independent from the privileges granted to the Codefresh build runner. The privileges needed by the runner are only used to launch Codefresh pipelines within your cluster. The Service account used in the "custom provider" setting should have the needed privileges for deployment. + +Once your cluster is connected you can use any of the familiar deployment methods such as the [dedicated deploy step]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) or [custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/). + +## Related articles +[Codefresh installation options]({{site.baseurl}}/docs/installation/installation-options/) +[Google marketplace integration]({{site.baseurl}}/docs/integrations/ci-integrations/google-marketplace/) +[Managing your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) diff --git a/_docs/reporting/dora-metrics.md b/_docs/reporting/dora-metrics.md deleted file mode 100644 index 229b2d7be..000000000 --- a/_docs/reporting/dora-metrics.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "DORA metrics" -description: "Get insights into your deployments" -group: reporting -toc: true ---- - -DevOps is a collaboration paradigm that is sometimes mistaken for being too abstract or too generic. In an effort to quantify the benefits of adopting DevOps, [Dora Research](https://www.devops-research.com/research.html#capabilities){:target="\_blank"} (acquired by Google in 2018), has introduced four key metrics that define specific goals for improving the software lifecycle in companies interested in adopting DevOps. - -DORA measures these metrics: - -* Deployment Frequency: How often an organization successfully releases to production -* Lead Time for Changes: The length of time for a commit to be deployed into production -* Change Failure Rate: The percentage of deployments causing a failure in production -* Time to Restore Service: The length of time for an organization to recover from a failure in production - -[Read more on DORA](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance){:target="\_blank"}. - -### DORA metrics in Codefresh - -Monitoring DORA metrics can help identify delivery issues in your organization by detecting bottlenecks among teams, and help to optimize your workflows, at technical or organizational levels. -Codefresh offers support for DORA metrics out of the box. - -* In the Codefresh UI, go to [DORA metrics](https://g.codefresh.io/2.0/dora-dashboard/dora){:target="\_blank"}. - -{% include -image.html -lightbox="true" -file="/images/reporting/dora-metrics.png" -url="/images/reporting/dora-metrics.png" -alt="DORA metrics report" -caption="DORA metrics report" -max-width="100%" -%} - -### Filters - -Use filters to define the exact subset of applications you are interested in. All filters support auto-complete and multi-select. -More than one option within the same filter type has an OR relationship. More than one filter type when defined share an AND relationship. - -* Runtimes: Show metrics for applications from selected runtimes -* Clusters: Show metrics for applications deployed to selected clusters -* Applications: Show metrics for selected applications. -* Time: Show metrics from application for a specific time period - -> When no filters are defined, all metrics are shown for the last 90 days. - -### Metrics for favorite applications -If you have [starred applications as favorites]({{site.baseurl}}/docs/deployment/applications-dashboard/#applications-dashboard-information) in the Applications dashboard, clicking {::nomarkdown}{:/} in DORA metrics, displays metrics only for those applications. - - -### Metric totals -As the title indicates, the Totals bar shows the total numbers, based on the filters defined, or for the last 90 days, if there are no filters: - -* Deployments -* Rollbacks -* Commits/Pull Requests -* Failure Rate: The number of failed deployments divided by the total number of deployments - -### Metric graphs -The metric graphs show performance for the DORA metrics, again based on the filters defined, or for the last 90 days, if there are no filters. - -In addition, you can select the granularity for each graph: - -* Daily -* Weekly -* Monthly - - - -**Deployment Frequency** - The frequency of deployments of any kind, successful or failed. Deployment is considered an Argo CD sync where there was a change. The X-axis charts the time based on the granularity, and the Y-axis charts the number of deployments. The number shown on the top right is the average deployment frequency based on granularity. - -**Change failure rate** - The failure or rollback rate in percentage for deployments. Derived by dividing the failed/rollback deployments by the total number of deployments. Failed deployments are those Argo CD deployments that lead to a sync state of Degraded. The X-axis charts the time based on the granularity, and the Y-axis charts the failure rate. The number shown on the top right is the average failure rate based on granularity, and therefore may not be equal to the Total Failure Rate. - -**Lead Time for Changes** - The average number of days from the first commit for a pull request until the deployment date for the same pull request. The X-axis charts the time based on the granularity, and the Y-axis charts the time in minutes until the deployment. The number shown on the top right is the average number of days for a commit to reach production. - -**Time to Restore Service** - The average number of hours taken for the status to return to Healthy after changing to Degraded or Unhealthy. The X-axis charts the time based on the granularity, and the Y-axis charts the time in hours. The number shown on the top right is the average number of hours between the previous deployment and rollback for the same application. - -## What to read next -[Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/) -[Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/) - diff --git a/_docs/runtime/download-runtime-logs.md b/_docs/runtime/download-runtime-logs.md deleted file mode 100644 index ca6cf8ff9..000000000 --- a/_docs/runtime/download-runtime-logs.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "View/download runtime logs" -description: "" -group: runtime -toc: true ---- - -Logs are available for completed runtimes, both for the runtime and for individual runtime components. Download runtime log files for offline viewing and analysis, or view online logs for a runtime component, and download if needed for offline analysis. Online logs support free-text search, search-result navigation, and line-warp for enhanced readability. - -Log files include events from the date of the application launch, with the newest events listed first. - - -### Download logs for runtimes -Download the log file for a runtime. The runtime log is downloaded as a `.tar.gz` file, which contains the individual log files for each runtime component. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime for which to download logs. -1. From the list of **Additional Actions**, select **Download All Logs**. - The log file is downloaded to the Downloads folder or the folder designated for downloads, with the filename, `.tar.gz`. For example, `codefreshv2-production2.tar.gz`. - - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-download-all.png" - url="/images/runtime/runtime-logs-download-all.png" - alt="Download logs for selected runtime" - caption="Download logs for selected runtime" - max-width="40%" -%} - - -{:start="4"} -1. To view the log files of the individual components, unzip the file. - Here is an example of the folder with the individual logs. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-folder-view.png" - url="/images/runtime/runtime-logs-folder-view.png" - alt="Individual log files in folder" - caption="Individual log files in folder" - max-width="50%" -%} - -{:start="5"} -1. Open a log file with the text editor of your choice. - - -### View/download logs for runtime components -View online logs for any runtime component, and if needed, download the log file for offline viewing and analysis. - -Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events from the application launch to the date and time of download. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime. -1. Select the runtime component and then select **View Logs**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-view-component.png" - url="/images/runtime/runtime-logs-view-component.png" - alt="View log option for individual runtime component" - caption="View log option for individual runtime component" - max-width="40%" -%} - - -{:start="4"} -1. Do the following: - * Search by free-text for any string, and click the next and previous buttons to navigate between the search results. - * To switch on line-wrap for readability, click **Wrap**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-screen-view.png" - url="/images/runtime/runtime-logs-screen-view.png" - alt="Runtime component log example" - caption="Runtime component log example" - max-width="50%" -%} - -{:start="5"} -1. To download the log, click **Download**. - The file is downloaded as `.log`. - -### Related information -[Manage Git Sources]({{site.baseurl}}/docs/runtime/git-sources/#viewdownload-logs-for-a-git-source) \ No newline at end of file diff --git a/_docs/runtime/installation-options.md b/_docs/runtime/installation-options.md deleted file mode 100644 index e75e2058f..000000000 --- a/_docs/runtime/installation-options.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: "Installation environments" -description: "" -group: runtime -toc: true ---- - -Codefresh supports two installation environments: - - -* **Hosted** environments (Beta), with Argo CD installed in the Codefresh cluster. - The runtime is installed and provisioned in a Codefresh cluster, and managed by Codefresh. - Hosted enviroments are full-cloud environments, where all updates and improvements are managed by Codefresh, with zero-maintenance overhead for you as the customer. Currently, you can add one hosted runtime per account. - For the architecture illustration, see [Hosted runtime architecture]({{site.baseurl}}/docs/getting-started/architecture/#hosted-runtime-architecture). - - -{% include - image.html - lightbox="true" - file="/images/runtime/intro-hosted-hosted-initial-view.png" - url="/images/runtime/intro-hosted-hosted-initial-view.png" - alt="Hosted runtime setup" - caption="Hosted runtime setup" - max-width="80%" -%} - - For more information on how to set up the hosted environment, including provisioning hosted runtimes, see [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -* **Hybrid** environments, with Argo CD installed in the customer's cluster. - The runtime is installed in the customer's cluster, and managed by the customer. - Hybrid environments are optimal for organizations that want to manage CI/CD operations within their premises, or have other security constraints. Hybrid installations strike the perfect balance between security, flexibility, and ease of use. Codefresh maintains and manages most aspects of the platform, apart from installing and upgrading runtimes which are managed by the customer. - - -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view.png" - url="/images/runtime/runtime-list-view.png" - alt="Runtime List View" - caption="Runtime List View" - max-width="70%" -%} - - For more information on hybrid environments, see [Hybrid runtime requirements]({{site.baseurl}}/docs/runtime/requirements/) and [Installling hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/). - - - -#### Git provider repos -Codefresh Runtime creates three repositories in your organization's Git provider account: - -* Codefresh runtime installation repository -* Codefresh Git Sources -* Codefresh shared configuration repository - - - -### Hosted vs.Hybrid environments - -The table below highlights the main differences between hosted and hybrid environments. - -{: .table .table-bordered .table-hover} -| Functionality |Feature | Hosted | Hybrid | -| -------------- | -------------- |--------------- | --------------- | -| Runtime | Installation | Provisioned by Codefresh | Provisioned by customer | -| | Runtime cluster | Managed by Codefresh | Managed by customer | -| | Number per account | One runtime | Multiple runtimes | -| | External cluster | Managed by customer | Managed by customer | -| | Upgrade | Managed by Codefresh | Managed by customer | -| | Uninstall | Managed by customer | Managed by customer | -| Argo CD | | Codefresh cluster | Customer cluster | -| CI Ops | Delivery Pipelines |Not supported | Supported | -| |Workflows | Not supported | Supported | -| |Workflow Templates | Not supported | Supported | -| CD Ops |Applications | Supported | Supported | -| |Image enrichment | Supported | Supported | -| | Rollouts | Supported | Supported | -|Integrations | | Supported | Supported | -|Dashboards |Home Analytics | Hosted runtime and deployments|Runtimes, deployments, Delivery Pipelines | -| |DORA metrics | Supported |Supported | -| |Applications | Supported |Supported | - -### Related articles -[Architecture]({{site.baseurl}}/docs/getting-started/architecture/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration) - diff --git a/_docs/runtime/installation.md b/_docs/runtime/installation.md deleted file mode 100644 index 440122104..000000000 --- a/_docs/runtime/installation.md +++ /dev/null @@ -1,535 +0,0 @@ ---- -title: "Install hybrid runtimes" -description: "" -group: runtime -toc: true ---- - -If you have a hybrid environment, you can provision one or more hybrid runtimes in your Codefresh account. - -> If you have Hosted GitOps, to provision a hosted runtime, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -**Git providers and runtimes** -Your Codefresh account is always linked to a specific Git provider. This is the Git provider you select on installing the first runtime, either hybrid or hosted, in your Codefresh account. All the hybrid runtimes you install in the same account use the same Git provider. -If Bitbucker Server is your Git provider, you must also select the specific server instance to associate with the runtime. - ->To change the Git provider for your Codefresh account after installation, contact Codefresh support. - - -**Hybrid runtime** - The hybrid runtime comprises Argo CD components and Codefresh-specific components. The Argo CD components are derived from a fork of the Argo ecosystem, and do not correspond to the open-source versions available. - -There are two parts to installing a hybrid runtime: - -1. Installing the Codefresh CLI -2. Installing the hybrid runtime from the CLI, either through the CLI wizard or via silent installation through the installation flags. - The hybrid runtime is installed in a specific namespace on your cluster. You can install more runtimes on different clusters in your deployment. - Every hybrid runtime installation makes commits to three Git repos: - * Runtime install repo: The installation repo that manages the hybrid runtime itself with Argo CD. If the repo URL does not exist, it is automatically created during runtime installation. - * Git Source repo: Created automatically during runtime installation. The repo where you store manifests for pipelines and applications. See [Git Sources]({{site.baseurl}}/docs/runtime/git-sources). - * Shared configuration repo: Created for the first runtime in a user account. The repo stores configuration manifests for account-level resources and is shared with other runtimes in the same account. See [Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration). - - -See also [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture). - -{::nomarkdown} -
            -{:/} - -### Hybrid runtime installation flags -This section describes the required and optional flags to install a hybrid runtime. -For documentation purposes, the flags are grouped into: -* Runtime flags, relating to runtime, cluster, and namespace requirements -* Ingress controller flags, relating to ingress controller requirements -* Git provider flags -* Codefresh resource flags - -{::nomarkdown} -
            -{:/} - -#### Runtime flags - -**Runtime name** -Required. -The runtime name must start with a lower-case character, and can include up to 62 lower-case characters and numbers. -* CLI wizard: Add when prompted. -* Silent install: Add the `--runtime` flag and define the runtime name. - -**Namespace resource labels** -Optional. -The label of the namespace resource to which you are installing the hybrid runtime. Labels are required to identify the networks that need access during installation, as is the case when using services meshes such as Istio for example. - -* CLI wizard and Silent install: Add the `--namespace-labels` flag, and define the labels in `key=value` format. Separate multiple labels with `commas`. - -**Kube context** -Required. -The cluster defined as the default for `kubectl`. If you have more than one Kube context, the current context is selected by default. - -* CLI wizard: Select the Kube context from the list displayed. -* Silent install: Explicitly specify the Kube context with the `--context` flag. - -**Shared configuration repository** -The Git repository per runtime account with shared configuration manifests. -* CLI wizard and Silent install: Add the `--shared-config-repo` flag and define the path to the shared repo. - -{::nomarkdown} -
            -{:/} - -#### Ingress-less flags -These flags are required to install the runtime without an ingress controller. - -**Access mode** -Required. - -The access mode for ingress-less runtimes, the tunnel mode. - - -* CLI wizard and Silent install: Add the flag, `--access-mode`, and define `tunnel` as the value. - - -**IP allowlist** - -Optional. - -The allowed list of IPs from which to forward requests to the internal customer cluster for ingress-less runtime installations. The allowlist can include IPv4 and IPv6 addresses, with/without subnet and subnet masks. Multiple IPs must be separated by commas. - -When omitted, all incoming requests are authenticated regardless of the IPs from which they originated. - -* CLI wizard and Silent install: Add the `--ips-allow-list` flag, followed by the IP address, or list of comma-separated IPs to define more than one. For example, `--ips-allow-list 77.126.94.70/16,192.168.0.0` - -{::nomarkdown} -
            -{:/} - -#### Ingress controller flags - - -**Skip ingress** -Required, if you are using an unsupported ingress controller. -For unsupported ingress controllers, bypass installing ingress resources with the `--skip-ingress` flag. -In this case, after completing the installation, manually configure the cluster's routing service, and create and register Git integrations. See the last step in [Install the hybrid runtime](#install-the-hybrid-runtime). - -**Ingress class** -Required. - -* CLI wizard: Select the ingress class for runtime installation from the list displayed. -* Silent install: Explicitly specify the ingress class through the `--ingress-class` flag. Otherwise, runtime installation fails. - -**Ingress host** -Required. -The IP address or host name of the ingress controller component. - -* CLI wizard: Automatically selects and displays the host, either from the cluster or the ingress controller associated with the **Ingress class**. -* Silent install: Add the `--ingress-host` flag. If a value is not provided, takes the host from the ingress controller associated with the **Ingress class**. - > Important: For AWS ALB, the ingress host is created post-installation. However, when prompted, add the domain name you will create in `Route 53` as the ingress host. - -**Insecure ingress hosts** -TLS certificates for the ingress host: -If the ingress host does not have a valid TLS certificate, you can continue with the installation in insecure mode, which disables certificate validation. - -* CLI wizard: Automatically detects and prompts you to confirm continuing the installation in insecure mode. -* Silent install: To continue with the installation in insecure mode, add the `--insecure-ingress-host` flag. - -**Internal ingress host** -Optional. -Enforce separation between internal (app-proxy) and external (webhook) communication by adding an internal ingress host for the app-proxy service in the internal network. -For both CLI wizard and Silent install: - -* For new runtime installations, add the `--internal-ingress-host` flag pointing to the ingress host for `app-proxy`. -* For existing installations, commit changes to the installation repository by modifying the `app-proxy ingress` and `.yaml` - See [(Optional) Internal ingress host configuration for existing hybrid runtimes](#optional-internal-ingress-host-configuration-for-existing-hybrid-runtimes). - -{::nomarkdown} -
            -{:/} - - - -#### Git provider and repo flags -The Git provider defined for the runtime. - ->Because Codefresh creates a [shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) for the runtimes in your account, the Git provider defined for the first runtime you install in your account is used for all the other runtimes in the same account. - -You can define any of the following Git providers: -* GitHub: - * [GitHub](#github) (the default Git provider) - * [GitHub Enterprise](#github-enterprise) -* GitLab: - * [GitLab Cloud](#gitlab-cloud) - * [GitLab Server](#gitlab-server) -* Bitbucket: - * [Bitbucket Cloud](#bitbucket-cloud) - * [Bitbucket Server](#bitbucket-server) - -{::nomarkdown} -
            -{:/} - - - -##### GitHub -GitHub is the default Git provider for hybrid runtimes. Being the default provider, for both the CLI wizard and Silent install, you need to provide only the repository URL and the Git runtime token. - -> For the required scopes, see [GitHub and GitHub Enterprise runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). - -`--repo --git-token ` - -where: -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. Copy the clone URL from your GitHub website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). - If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during runtime installation. - - Repo URL format: - `https://github.com//reponame>.git[/subdirectory][?ref=branch]` - where: - * `/` is your username or organization name, followed by the name of the repo, identical to the HTTPS clone URL. For example, `https://github.com/nr-codefresh/codefresh.io.git`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://github.com/nr-codefresh/codefresh.io.git/runtimes/defs?ref=codefresh-prod` -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). - -{::nomarkdown} -
            -{:/} - -##### GitHub Enterprise - -> For the required scopes, see [GitHub and GitHub Enterprise runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). - - -`--enable-git-providers --provider github --repo --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider github` (required), defines GitHub Enterprise as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitHub Enterprise website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). - If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during runtime installation. - Repo URL format: - - `https://ghe-trial.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` - where: - * `/` is your username or organization name, followed by the name of the repo. For example, `codefresh-io/codefresh.io.git`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://ghe-trial.devops.cf-cd.com/codefresh-io/codefresh.io.git/runtimes/defs?ref=codefresh-prod` -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). - - -{::nomarkdown} -
            -{:/} - -##### GitLab Cloud -> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). - - -`--enable-git-providers --provider gitlab --repo --git-token ` - -where: -* `--enable-git-providers`(required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines GitLab Cloud as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git project for the runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitLab website. - If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during runtime installation. - - > Important: You must create the group with access to the project prior to the installation. - - Repo URL format: - - `https://gitlab.com//.git[/subdirectory][?ref=branch]` - where: - * `` is either your username, or if your project is within a group, the front-slash separated path to the project. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) - * `` is the name of the project. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Examples: - `https://gitlab.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) - - `https://gitlab.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) - -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). - - -{::nomarkdown} -
            -{:/} - - - -##### GitLab Server - -> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). - -`--enable-git-providers --provider gitlab --repo --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines GitLab Server as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. - If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during runtime installation. - - > Important: You must create the group with access to the project prior to the installation. - - Repo URL format: - `https://gitlab-onprem.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` - where: - * `` is your username, or if the project is within a group or groups, the name of the group. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) - * `` is the name of the project. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Examples: - `https://gitlab-onprem.devops.cf-cd.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) - - `https://gitlab-onprem.devops.cf-cd.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) - -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). - - -{::nomarkdown} -
            -{:/} - -##### Bitbucket Cloud -> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). - - -`--enable-git-providers --provider bitbucket --repo --git-user --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. - If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during runtime installation. - >Important: Remove the username, including @ from the copied URL. - - Repo URL format: - - `https://bitbucket.org.git[/subdirectory][?ref=branch]` - where: - * `` is your workspace ID. For example, `nr-codefresh`. - * `` is the name of the repository. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://bitbucket.org/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` -* `--git-user ` (required), is your username for the Bitbucket Cloud account. -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). - - -{::nomarkdown} -
            -{:/} - -##### Bitbucket Server - -> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). - - -`--enable-git-providers --provider bitbucket-server --repo --git-user --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. - If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh then creates the project during runtime installation. - >Important: Remove the username, including @ from the copied URL. - - Repo URL format: - - `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm//.git[/subdirectory][?ref=branch]` - where: - * `` is your username or organization name. For example, `codefresh-io.`. - * `` is the name of the repo. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm/codefresh-io/codefresh.git/runtimes/defs?ref=codefresh-prod` -* `--git-user ` (required), is your username for the Bitbucket Server account. -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). - -{::nomarkdown} -

            -{:/} - -#### Codefresh resource flags -**Codefresh demo resources** -Optional. -Install demo pipelines to use as a starting point to create your own pipelines. We recommend installing the demo resources as these are used in our quick start tutorials. - -* Silent install: Add the `--demo-resources` flag, and define its value as `true` (default), or `false`. For example, `--demo-resources=true` - -**Insecure flag** -For _on-premises installations_, if the Ingress controller does not have a valid SSL certificate, to continue with the installation, add the `--insecure` flag to the installation command. - -{::nomarkdown} -

            -{:/} - - -### Install the Codefresh CLI - -Install the Codefresh CLI using the option that best suits you: `curl`, `brew`, or standard download. -If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for CLI installation. - -{::nomarkdown} -

            -{:/} - -### Install the hybrid runtime - -**Before you begin** -* Make sure you meet the [minimum requirements]({{site.baseurl}}/docs/runtime/requirements/#minimum-requirements) for runtime installation -* Make sure you have [runtime token with the required scopes from your Git provdier]({{site.baseurl}}/docs/reference/git-tokens) -* [Download or upgrade to the latest version of the CLI]({{site.baseurl}}/docs/clients/csdp-cli/#upgrade-codefresh-cli) -* Review [Hybrid runtime installation flags](#hybrid-runtime-installation-flags) -* Make sure your ingress controller is configured correctly: - * [Ambasador ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#ambassador-ingress-configuration) - * [AWS ALB ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#alb-aws-ingress-configuration) - * [Istio ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#istio-ingress-configuration) - * [NGINX Enterprise ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-enterprise-ingress-configuration) - * [NGINX Community ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-community-version-ingress-configuration) - * [Traefik ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#traefik-ingress-configuration) - - -{::nomarkdown} -
            -{:/} - -**How to** - -1. Do one of the following: - * If this is your first hybrid runtime installation, in the Welcome page, select **+ Install Runtime**. - * If you have provisioned a hybrid runtime, to provision additional runtimes, in the Codefresh UI, go to [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Click **+ Add Runtimes**, and then select **Hybrid Runtimes**. -1. Do one of the following: - * CLI wizard: Run `cf runtime install`, and follow the prompts to enter the required values. - * Silent install: Pass the required flags in the install command: - `cf runtime install --repo --git-token --silent` - For the list of flags, see [Hybrid runtime installation flags](#hybrid-runtime-installation-flags). -1. If relevant, complete the configuration for these ingress controllers: - * [ALB AWS: Alias DNS record in route53 to load balancer]({{site.baseurl}}/docs/runtime/requirements/#alias-dns-record-in-route53-to-load-balancer) - * [Istio: Configure cluster routing service]({{site.baseurl}}/docs/runtime/requirements/#cluster-routing-service) - * [NGINX Enterprise ingress controller: Patch certificate secret]({{site.baseurl}}/docs/runtime/requirements/#patch-certificate-secret) -1. If you bypassed installing ingress resources with the `--skip-ingress` flag for ingress controllers not in the supported list, create and register Git integrations using these commands: - `cf integration git add default --runtime --api-url ` - `cf integration git register default --runtime --token ` - - -{::nomarkdown} -
            -{:/} - -### Hybrid runtime components - -**Git repositories** -* Runtime install repository: The installation repo contains three folders: apps, bootstrap and projects, to manage the runtime itself with Argo CD. -* Git source repository: Created with the name `[repo_name]_git-source`. This repo stores manifests for pipelines with sources, events, workflow templates. See [Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/). - -* Shared configuration repository: Stores configuration and resource manifests that can be shared across runtimes, such as integration resources. See [Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration/) - -**Argo CD components** -* Project, comprising an Argo CD AppProject and an ApplicationSet -* Installations of the following applications in the project: - * Argo CD - * Argo Workflows - * Argo Events - * Argo Rollouts - -**Codefresh-specific components** -* Codefresh Applications in the Argo CD AppProject: - * App-proxy facilitating behind-firewall access to Git - * Git Source entity that references the`[repo_name]_git-source` - -Once the hybrid runtime is successfully installed, it is provisioned on the Kubernetes cluster, and displayed in the **Runtimes** page. - -{::nomarkdown} -
            -{:/} - - -### (Optional) Internal ingress host configuration for existing hybrid runtimes -If you already have provisioned hybrid runtimes, to use an internal ingress host for app-proxy communication and an external ingress host to handle webhooks, change the specs for the `Ingress` and `Runtime` resources in the runtime installation repository. Use the examples as guidelines. - -`/apps/app-proxy/overlays//ingress.yaml`: change `host` - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: codefresh-cap-app-proxy - namespace: codefresh #replace with your runtime name -spec: - ingressClassName: nginx - rules: - - host: my-internal-ingress-host # replace with the internal ingress host for app-proxy - http: - paths: - - backend: - service: - name: cap-app-proxy - port: - number: 3017 - path: /app-proxy/ - pathType: Prefix -``` - -`..//bootstrap/.yaml`: add `internalIngressHost` - -```yaml -apiVersion: v1 -data: - base-url: https://g.codefresh.io - runtime: | - apiVersion: codefresh.io/v1alpha1 - kind: Runtime - metadata: - creationTimestamp: null - name: codefresh #replace with your runtime name - namespace: codefresh #replace with your runtime name - spec: - bootstrapSpecifier: github.com/codefresh-io/cli-v2/manifests/argo-cd - cluster: https://7DD8390300DCEFDAF87DC5C587EC388C.gr7.us-east-1.eks.amazonaws.com - components: - - isInternal: false - name: events - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-events - wait: true - - isInternal: false - name: rollouts - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-rollouts - wait: false - - isInternal: false - name: workflows - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-workflows - wait: false - - isInternal: false - name: app-proxy - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/app-proxy - wait: false - defVersion: 1.0.1 - ingressClassName: nginx - ingressController: k8s.io/ingress-nginx - ingressHost: https://support.cf.com/ - internalIngressHost: https://my-internal-ingress-host # add this line and replace my-internal-ingress-host with your internal ingress host - repo: https://github.com/NimRegev/my-codefresh.git - version: 99.99.99 -``` - - -### Related articles -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) -[Troubleshoot hybrid runtime installation]({{site.baseurl}}/docs/troubleshooting/runtime-issues/) diff --git a/_docs/runtime/installation_original.md b/_docs/runtime/installation_original.md deleted file mode 100644 index a9624bc7d..000000000 --- a/_docs/runtime/installation_original.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: "Install hybrid runtimes" -description: "" -group: runtime -toc: true ---- - -If you have a hybrid environment, you can provision one or more hybrid runtimes in your Codefresh account. The hybrid runtime comprises Argo CD components and Codefresh-specific components. The Argo CD components are derived from a fork of the Argo ecosystem, and do not correspond to the open-source versions available. - -> If you have Hosted GitOps, to provision a hosted runtime, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -There are two parts to installing a hybrid runtime: - -1. Installing the Codefresh CLI -2. Installing the hybrid runtime from the CLI, either through the CLI wizard or via silent installation. - The hybrid runtime is installed in a specific namespace on your cluster. You can install more runtimes on different clusters in your deployment. - Every hybrid runtime installation makes commits to two Git repos: - - * Runtime install repo: The installation repo that manages the hybrid runtime itself with Argo CD. If the repo URL does not exist, runtime creates it automatically. - * Git Source repo: Created automatically during runtime installation. The repo where you store manifests to run CodefreshCodefresh pipelines. - -See also [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture). - -### Installing the Codefresh CLI - -Install the Codefresh CLI using the option that best suits you: `curl`, `brew`, or standard download. -If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for CLI installation. - -### Installing the hybrid runtime - -1. Do one of the following: - * If this is your first hybrid runtime installation, in the Welcome page, select **+ Install Runtime**. - * If you have provisioned a hybrid runtime, to provision additional runtimes, in the Codefresh UI, go to [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtimes**. -1. Run: - * CLI wizard: Run `cf runtime install`, and follow the prompts to enter the required values. - * Silent install: Pass the required flags in the install command: - `cf runtime install --repo --git-token --silent` - For the list of flags, see _Hybrid runtime flags_. - -> Note: -> Hybrid runtime installation starts by checking network connectivity and the K8s cluster server version. - To skip these tests, pass the `--skip-cluster-checks` flag. - -#### Hybrid runtime flags - -**Runtime name** -Required. -The runtime name must start with a lower-case character, and can include up to 62 lower-case characters and numbers. -* CLI wizard: Add when prompted. -* Silent install: Required. - -**Namespace resource labels** -Optional. -The label of the namespace resource to which you are installing the hybrid runtime. You can add more than one label. Labels are required to identity the networks that need access during installation, as is the case when using services meshes such as Istio for example. - -* CLI wizard and Silent install: Add the `--namespace-labels` flag, and define the labels in `key=value` format. Separate multiple labels with `commas`. - -**Kube context** -Required. -The cluster defined as the default for `kubectl`. If you have more than one Kube context, the current context is selected by default. - -* CLI wizard: Select the Kube context from the list displayed. -* Silent install: Explicitly specify the Kube context with the `--context` flag. - -**Ingress class** -Required. -If you have more than one ingress class configured on your cluster: - -* CLI wizard: Select the ingress class for runtime installation from the list displayed. -* Silent install: Explicitly specify the ingress class through the `--ingress-class` flag. Otherwise, runtime installation fails. - -**Ingress host** -Required. -The IP address or host name of the ingress controller component. - -* CLI wizard: Automatically selects and displays the host, either from the cluster or the ingress controller associated with the **Ingress class**. -* Silent install: Add the `--ingress-host` flag. If a value is not provided, takes the host from the ingress controller associated with the **Ingress class**. - > Important: For AWS ALB, the ingress host is created post-installation. However, when prompted, add the domain name you will create in `Route 53` as the ingress host. - -SSL certificates for the ingress host: -If the ingress host does not have a valid SSL certificate, you can continue with the installation in insecure mode, which disables certificate validation. - -* CLI wizard: Automatically detects and prompts you to confirm continuing with the installation in insecure mode. -* Silent install: To continue with the installation in insecure mode, add the `--insecure-ingress-host` flag. - -**Internal ingress host** -Optional. -Enforce separation between internal (app-proxy) and external (webhook) communication by adding an internal ingress host for the app-proxy service in the internal network. -For both CLI wizard and Silent install: - -* For new runtime installations, add the `--internal-ingress-host` flag pointing to the ingress host for `app-proxy`. -* For existing installations, commit changes to the installation repository by modifying the `app-proxy ingress` and `.yaml` - See _Internal ingress host configuration (optional for existing runtimes only)_ in [Post-installation configuration](#post-installation-configuration). - -**Ingress resources** -Optional. -If you have a different routing service (not NGINX), bypass installing ingress resources with the `--skip-ingress` flag. -In this case, after completing the installation, manually configure the cluster's routing service, and create and register Git integrations. See _Cluster routing service_ in [Post-installation configuration](#post-installation-configuration). - -**Shared configuration repository** -The Git repository per runtime account with shared configuration manifests. -* CLI wizard and Silent install: Add the `--shared-config-repo` flag and define the path to the shared repo. - -**Insecure flag** -For _on-premises installations_, if the Ingress controller does not have a valid SSL certificate, to continue with the installation, add the `--insecure` flag to the installation command. - -**Repository URLs** -The GitHub repository to house the installation definitions. - -* CLI wizard: If the repo doesn't exist, Codefresh creates it during runtime installation. -* Silent install: Required. Add the `--repo` flag. - -**Git runtime token** -Required. -The Git token authenticating access to the GitHub installation repository. -* Silent install: Add the `--git-token` flag. - -**Codefresh demo resources** -Optional. -Install demo pipelines to use as a starting point to create your own pipelines. We recommend installing the demo resources as these are used in our quick start tutorials. - -* Silent install: Add the `--demo-resources` flag. By default, set to `true`. - -### Hybrid runtime components - -**Git repositories** - -* Runtime install repo: The installation repo contains three folders: apps, bootstrap and projects, to manage the runtime itself with Argo CD. -* Git source repository: Created with the name `[repo_name]_git-source`. This repo stores manifests for pipelines with sources, events, workflow templates. - -**Argo CD components** - -* Project, comprising an Argo CD AppProject and an ApplicationSet -* Installations of the following applications in the project: - * Argo CD - * Argo Workflows - * Argo Events - * Argo Rollouts - -**Codefresh-specific components** - -* Codefresh Applications in the Argo CD AppProject: - * App-proxy facilitating behind-firewall access to Git - * Git Source entity that references the`[repo_name]_git-source` - -Once the hybrid runtime is successfully installed, it is provisioned on the Kubernetes cluster, and displayed in the **Runtimes** page. - -### Hybrid runtime post-installation configuration - -After provisioning a hybrid runtime, configure additional settings for the following: - -* NGINX Enterprise installations (with and without NGINX Ingress Operator) -* AWS ALB installations -* Cluster routing service if you bypassed installing ingress resources -* (Existing hybrid runtimes) Internal and external ingress host specifications -* Register Git integrations - - - -#### AWS ALB post-install configuration - -For AWS ALB installations, do the following: - -* Create an `Alias` record in Amazon Route 53 -* Manually register Git integrations - see _Git integration registration_. - -Create an `Alias` record in Amazon Route 53, and map your zone apex (example.com) DNS name to your Amazon CloudFront distribution. -For more information, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html){:target="\_blank"}. - -{% include image.html - lightbox="true" - file="/images/runtime/post-install-alb-ingress.png" - url="/images/runtime/post-install-alb-ingress.png" - alt="Route 53 record settings for AWS ALB" - caption="Route 53 record settings for AWS ALB" - max-width="30%" -%} - -#### Configure cluster routing service - -If you bypassed installing ingress resources with the `--skip-ingress` flag, configure the `host` for the Ingress, or the VirtualService for Istio if used, to route traffic to the `app-proxy` and `webhook` services, as in the examples below. - -**Ingress resource example for `app-proxy`:** - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: codefresh-cap-app-proxy - namespace: codefresh -spec: - ingressClassName: alb - rules: - - host: my.support.cf-cd.com # replace with your host name - http: - paths: - - backend: - service: - name: cap-app-proxy - port: - number: 3017 - path: /app-proxy/ - pathType: Prefix -``` - -**`VirtualService` examples for `app-proxy` and `webhook`:** - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: cap-app-proxy -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway - http: - - match: - - uri: - prefix: /app-proxy - route: - - destination: - host: cap-app-proxy - port: - number: 3017 -``` - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: csdp-default-git-source -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway - http: - - match: - - uri: - prefix: /webhooks/test-runtime3/push-github # replace `test-runtime3` with your runtime name - route: - - destination: - host: push-github-eventsource-svc - port: - number: 80 -``` -Continue with [Git integration registration](#git-integration-registration) in this article. - -#### Internal ingress host configuration (optional for existing hybrid runtimes only) - -If you already have provisioned hybrid runtimes, to use an internal ingress host for app-proxy communication and an external ingress host to handle webhooks, change the specs for the `Ingress` and `Runtime` resources in the runtime installation repository. Use the examples as guidelines. - -`/apps/app-proxy/overlays//ingress.yaml`: change `host` - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: codefresh-cap-app-proxy - namespace: codefresh #replace with your runtime name -spec: - ingressClassName: nginx - rules: - - host: my-internal-ingress-host # replace with the internal ingress host for app-proxy - http: - paths: - - backend: - service: - name: cap-app-proxy - port: - number: 3017 - path: /app-proxy/ - pathType: Prefix -``` - -`..//bootstrap/.yaml`: add `internalIngressHost` - -```yaml -apiVersion: v1 -data: - base-url: https://g.codefresh.io - runtime: | - apiVersion: codefresh.io/v1alpha1 - kind: Runtime - metadata: - creationTimestamp: null - name: codefresh #replace with your runtime name - namespace: codefresh #replace with your runtime name - spec: - bootstrapSpecifier: github.com/codefresh-io/cli-v2/manifests/argo-cd - cluster: https://7DD8390300DCEFDAF87DC5C587EC388C.gr7.us-east-1.eks.amazonaws.com - components: - - isInternal: false - name: events - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-events - wait: true - - isInternal: false - name: rollouts - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-rollouts - wait: false - - isInternal: false - name: workflows - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-workflows - wait: false - - isInternal: false - name: app-proxy - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/app-proxy - wait: false - defVersion: 1.0.1 - ingressClassName: nginx - ingressController: k8s.io/ingress-nginx - ingressHost: https://support.cf.com/ - internalIngressHost: https://my-internal-ingress-host # add this line and replace my-internal-ingress-host with your internal ingress host - repo: https://github.com/NimRegev/my-codefresh.git - version: 99.99.99 -``` - -#### Git integration registration - -If you bypassed installing ingress resources with the `--skip-ingress` flag, or if AWS ALB is your ingress controller, create and register Git integrations using these commands: - `cf integration git add default --runtime --api-url ` - - `cf integration git register default --runtime --token ` - -### Related articles -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) -[Troubleshoot runtime installation]({{site.baseurl}}/docs/troubleshooting/runtime-issues/) diff --git a/_docs/runtime/monitor-manage-runtimes.md b/_docs/runtime/monitor-manage-runtimes.md deleted file mode 100644 index 189b2b081..000000000 --- a/_docs/runtime/monitor-manage-runtimes.md +++ /dev/null @@ -1,332 +0,0 @@ ---- -title: "Manage provisioned runtimes" -description: "" -group: runtime -redirect_from: - - /monitor-manage-runtimes/ - - /monitor-manage-runtimes -toc: true ---- - - -The **Runtimes** page displays the provisioned runtimes in your account, both hybrid, and the hosted runtime if you have one. - -View runtime components and information in List or Topology view formats, and upgrade, uninstall, and migrate runtimes. - -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view.png" - url="/images/runtime/runtime-list-view.png" - alt="Runtime List View" - caption="Runtime List View" - max-width="70%" -%} - -Select the view mode to view runtime components and information, and manage provisioned runtimes in the view mode that suits you. - - -Manage provisioned runtimes: -* [Add managed clusters to hybrid or hosted runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/)) -* [Add and manage Git Sources associated with hybrid or hosted runtimes]({{site.baseurl}}/docs/runtime/git-sources/)) -* [Upgrade provisioned hybrid runtimes](#hybrid-upgrade-provisioned-runtimes) -* [Uninstall provisioned runtimes](#uninstall-provisioned-runtimes) -* [Migrate ingress-less hybrid runtimes]((#hybrid-migrate-ingress-less-runtimes)) - -> Unless specified otherwise, management options are common to both hybrid and hosted runtimes. If an option is valid only for hybrid runtimes, it is indicated as such. - -* Add managed clusters to hybrid or hosted runtimes (see [Adding & managing external clusters]({{site.baseurl}}/docs/runtime/managed-cluster/)) -* Add and manage Git Sources associated with hybrid or hosted runtimes (see [Adding & managing Git Sources]({{site.baseurl}}/docs/runtime/git-sources/)) -* Upgrade provisioned hybrid runtimes -* Uninstall hybrid or hosted runtimes -* Update Git runtime tokens - -To monitor provisioned hybrid runtimes, including recovering runtimes for failed clusters, see [Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/). - -### Runtime views - -View provisioned hybrid and hosted runtimes in List or Topology view formats. - -* List view: The default view, displays the list of provisioned runtimes, the clusters managed by them, and Git Sources. -* Topology view: Displays a hierarchical view of runtimes and the clusters managed by them, with health and sync status of each cluster. - -#### List view - -The List view is a grid-view of the provisioned runtimes. - -Here is an example of the List view for runtimes. -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view.png" - url="/images/runtime/runtime-list-view.png" - alt="Runtime List View" - caption="Runtime List View" - max-width="70%" -%} - -Here is a description of the information in the List View. - -{: .table .table-bordered .table-hover} -| List View Item| Description | -| -------------- | ---------------- | -|**Name**| The name of the provisioned Codefresh runtime. | -|**Type**| The type of runtime provisioned, and can be **Hybrid** or **Hosted**. | -|**Cluster/Namespace**| The K8s API server endpoint, as well as the namespace with the cluster. | -|**Modules**| The modules installed based on the type of provisioned runtime. Hybrid runtimes include CI amnd CD Ops modules. Hosted runtimes inlcude CD Ops. | -|**Managed Cluster**| The number of managed clusters if any, for the runtime. To view list of managed clusters, select the runtime, and then the **Managed Clusters** tab. To work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster).| -|**Version**| The version of the runtime currently installed. **Update Available!** indicates there are later versions of the runtime. To see all the commits to the runtime, mouse over **Update Available!**, and select **View Complete Change Log**. -|**Last Updated**| The most recent update information from the runtime to the Codefresh platform. Updates are sent to the platform typically every few minutes. Longer update intervals may indicate networking issues.| -|**Sync Status**| The health and sync status of the runtime or cluster. {::nomarkdown}
            • indicates health or sync errors in the runtime, or a managed cluster if one was added to the runtime.
              The runtime name is colored red.
            • indicates that the runtime is being synced to the cluster on which it is provisioned.
            {:/} | - -#### Topology view - -A hierachical visualization of the provisioned runtimes. The Topology view makes it easy to identify key information such as versions, health and sync status, for both the provisioned runtime and the clusters managed by it. -Here is an example of the Topology view for runtimes. - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-topology-view.png" - url="/images/runtime/runtime-topology-view.png" - alt="Runtime Topology View" - caption="Runtime Topology View" - max-width="30%" -%} - -Here is a description of the information in the Topology view. - -{: .table .table-bordered .table-hover} -| Topology View Item | Description | -| ------------------------| ---------------- | -|**Runtime** | ![](../../../images/icons/codefresh-runtime.png?display=inline-block) the provisioned runtime. Hybrid runtimes display the name of the K8s API server endpoint with the cluster. Hosted runtimes display 'hosted'. | -|**Cluster** | The local, and managed clusters if any, for the runtime. {::nomarkdown}
            • indicates the local cluster, always displayed as `in-cluster`. The in-cluster server URL is always set to `https://kubernetes.default.svc/`.
            • indicates a managed cluster.
            • select to add a new managed cluster.
            {:/} To view cluster components, select the cluster. To add and work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster). | -|**Health/Sync status** |The health and sync status of the runtime or cluster. {::nomarkdown}
            • indicates health or sync errors in the runtime, or a managed cluster if one was added to the runtime.
              The runtime or cluster node is bordered in red and the name is colored red.
            • indicates that the runtime is being synced to the cluster on which it is provisioned.
            {:/} | -|**Search and View options** | {::nomarkdown}
            • Find a runtime or its clusters by typing part of the runtime/cluster name, and then navigate to the entries found.
            • Topology view options: Resize to window, zoom in, zoom out, full screen view.
            {:/}| - - - -### (Hybrid) Upgrade provisioned runtimes - -Upgrade provisioned hybrid runtimes to install critical security updates or to install the latest version of all components. Upgrade a provisioned hybrid runtime by running a silent upgrade or through the CLI wizard. -If you have managed clusters for the hybrid runtime, upgrading the runtime automatically updates runtime components within the managed cluster as well. - -> When there are security updates, the UI displays the alert, _At least one runtime requires a security update_. The Version column displays an _Update Required!_ notification. - -> If you have older runtime versions, upgrade to manually define or create the shared configuration repo for your account. See [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/). - - -**Before you begin** -For both silent or CLI-wizard based upgrades, make sure you have: - -* The latest version of the Codefresh CLI - Run `cf version` to see your version and [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"} to compare with the latest CLI version. -* A valid runtime Git token - -**Silent upgrade** - -* Pass the mandatory flags in the upgrade command: - - `cf runtime upgrade --git-token --silent` - where: - `` is a valid runtime token with the `repo` and `admin-repo.hook` scopes. - -**CLI wizard-based upgrade** - -1. In the Codefresh UI, make sure you are in [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Switch to either the **List View** or to the **Topology View**. -1. **List view**: - * Select the runtime name. - * To see all the commits to the runtime, in the Version column, mouse over **Update Available!**, and select **View Complete Change Log**. - * On the top-right, select **Upgrade**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view-upgrade.png" - url="/images/runtime/runtime-list-view-upgrade.png" - alt="List View: Upgrade runtime option" - caption="List View: Upgrade runtime option" - max-width="30%" - %} - - **Topology view**: - Select the runtime cluster, and from the panel, select the three dots and then select **Upgrade Runtime**. - {% include - image.html - lightbox="true" - file="/images/runtime/runtiime-topology-upgrade.png" - url="/images/runtime/runtiime-topology-upgrade.png" - alt="Topology View: Upgrade runtime option" - caption="Topology View: Upgrade runtime option" - max-width="30%" -%} - -{:start="4"} - -1. If you have already installed the Codefresh CLI, in the Install Upgrades panel, copy the upgrade command. - - {% include - image.html - lightbox="true" - file="/images/runtime/install-upgrades.png" - url="/images/runtime/install-upgrades.png" - alt="Upgrade runtime" - caption="Upgrade runtime panel" - max-width="30%" -%} - -{:start="5"} -1. In your terminal, paste the command, and do the following: - * Update the Git token value. - * To manually define the shared configuration repo, add the `--shared-config-repo` flag with the path to the repo. -1. Confirm to start the upgrade. - - - - -### Uninstall provisioned runtimes - -Uninstall provisioned hybrid and hosted runtimes that are not in use. Uninstall a runtime by running a silent uninstall, or through the CLI wizard. -> Uninstalling a runtime removes the Git Sources and managed clusters associated with the runtime. - -**Before you begin** -For both types of uninstalls, make sure you have: - -* The latest version of the Codefresh CLI -* A valid runtime Git token -* The Kube context from which to uninstall the provisioned runtime - -**Silent uninstall** -Pass the mandatory flags in the uninstall command: - `cf runtime uninstall --git-token --silent` - where: - `--git-token` is a valid runtime token with the `repo` and `admin-repo.hook` scopes. - -**CLI wizard uninstall** - -1. In the Codefresh UI, make sure you are in [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Switch to either the **List View** or to the **Topology View**. -1. **List view**: On the top-right, select the three dots and then select **Uninstall**. - - {% include - image.html - lightbox="true" - file="/images/runtime/uninstall-location.png" - url="/images/runtime/uninstall-location.png" - alt="List View: Uninstall runtime option" - caption="List View: Uninstall runtime option" - max-width="30%" -%} - -**Topology view**: Select the runtime node, and from the panel, select the three dots and then select **Uninstall Runtime**. - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-topology-uninstall.png" - url="/images/runtime/runtime-topology-uninstall.png" - alt="Topology View: Uninstall runtime option" - caption="Topology View: Uninstall runtime option" - max-width="30%" -%} - -{:start="4"} - -1. If you already have the latest version of the Codefresh CLI, in the Uninstall Codefresh Runtime panel, copy the uninstall command. - - {% include - image.html - lightbox="true" - file="/images/runtime/uninstall.png" - url="/images/runtime/uninstall.png" - alt="Uninstall Codefresh runtime" - caption="Uninstall Codefresh runtime" - max-width="40%" -%} - -{:start="5"} - -1. In your terminal, paste the command, and update the Git token value. -1. Select the Kube context from which to uninstall the runtime, and then confirm the uninstall. -1. If you get errors, run the uninstall command again, with the `--force` flag. - - - -### Update Git runtime tokens - -Provisioned runtimes require valid Git tokens to authenticate the runtimes. - -There are two different situations when you need to updating Git runtime tokens: -* Update invalid, revoked, or expired tokens: Codefresh automatically flags runtimes with such tokens. It is mandatory to update the Git tokens for these runtimes to continue working with the platform. -* Update valid tokens: Optional. You may want to update Git runtime tokens, even valid runtime tokens, by deleting the existing token and replacing it with a new runtime token. - -The methods for updating any Git runtime token is the same regardless of the reason for the update: -* OAuth2 authorization, if your admin has registered an OAuth Application for Codefresh -* Git access token authentication, by generating a personal access token in your Git provider account with the correct permissions - -**Before you begin** -* To authenticate through a Git access token, generate an access token for the runtime with the correct scopes: `repo` and `admin-repo` - -**How to** -1. Do one of the following: - * If you see a notification in the Codefresh UI about invalid runtime tokens, click **[Update Token]**. - In the Runtimes page, you can see runtimes with invalid tokens are prefixed by the key icon. Mouse over shows invalid token. - * To update an existing runtime token, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Select the runtime for which to update the Git token. -1. From the context menu with the additional actions at the top-right, select **Update Git Runtime token**. - - {% include - image.html - lightbox="true" - file="/images/runtime/update-git-runtime-token.png" - url="/images/runtime/update-git-runtime-token.png" - alt="Update Git runtime token option" - caption="Update Git runtime token option" - max-width="40%" -%} - -{:start="4"} -1. Do one of the following: - * If your admin has set up OAuth access, click **Authorize Access to Git Provider**. Go to _step 5_. - * Alternatively, authenticate with an access token from your Git provider. Go to _step 6_. - -{:start="5"} -1. For OAuth2 authorization: - > If the application is not registered, you get an error. Contact your admin for help. - * Enter your credentials, and select **Sign In**. - * If required, as for example if two-factor authentication is configured, complete the verification. - - {% include - image.html - lightbox="true" - file="/images/administration/user-settings/oauth-user-authentication.png" - url="/images/administration/user-settings/oauth-user-authentication.png" - alt="Authorizing access with OAuth2" - caption="Authorizing access with OAuth2" - max-width="30%" - %} - -{:start="6"} -1. For Git token authentication, expand **Advanced authorization options**, and then paste the generated token in the **Git runtime token** field. - -1. Click **Update Token**. - - -### Related articles -[Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) - diff --git a/_docs/runtime/monitoring-troubleshooting.md b/_docs/runtime/monitoring-troubleshooting.md deleted file mode 100644 index c225c1b45..000000000 --- a/_docs/runtime/monitoring-troubleshooting.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: "(Hybrid) Monitor provisioned runtimes" -description: "" -group: runtime -toc: true ---- - -Monitor provisioned runtimes for security, health, and sync errors: - -* (Hybrid and hosted) View/download logs for runtimes and for runtime components -* (Hybrid) Restore provisioned runtimes -* (Hybrid) Configure browsers to allow access to insecure runtimes -* (Hybrid) Monitor notifications in the Activity Log - - -### View/download logs to troubleshoot runtimes -Logs are available for completed runtimes, both for the runtime and for individual runtime components. Download runtime log files for offline viewing and analysis, or view online logs for a runtime component, and download if needed for offline analysis. Online logs support free-text search, search-result navigation, and line-warp for enhanced readability. - -Log files include events from the date of the application launch, with the newest events listed first. - -{::nomarkdown} -

            -{:/} - -#### Download logs for runtimes -Download the log file for a runtime. The runtime log is downloaded as a `.tar.gz` file, which contains the individual log files for each runtime component. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime for which to download logs. -1. From the list of **Additional Actions**, select **Download All Logs**. - The log file is downloaded to the Downloads folder or the folder designated for downloads, with the filename, `.tar.gz`. For example, `codefreshv2-production2.tar.gz`. - - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-download-all.png" - url="/images/runtime/runtime-logs-download-all.png" - alt="Download logs for selected runtime" - caption="Download logs for selected runtime" - max-width="40%" -%} - - -{:start="4"} -1. To view the log files of the individual components, unzip the file. - Here is an example of the folder with the individual logs. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-folder-view.png" - url="/images/runtime/runtime-logs-folder-view.png" - alt="Individual log files in folder" - caption="Individual log files in folder" - max-width="50%" -%} - -{:start="5"} -1. Open a log file with the text editor of your choice. - -{::nomarkdown} -

            -{:/} - -#### View/download logs for runtime components -View online logs for any runtime component, and if needed, download the log file for offline viewing and analysis. - -Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events from the application launch to the date and time of download. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime. -1. Select the runtime component and then select **View Logs**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-view-component.png" - url="/images/runtime/runtime-logs-view-component.png" - alt="View log option for individual runtime component" - caption="View log option for individual runtime component" - max-width="40%" -%} - - -{:start="4"} -1. Do the following: - * Search by free-text for any string, and click the next and previous buttons to navigate between the search results. - * To switch on line-wrap for readability, click **Wrap**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-screen-view.png" - url="/images/runtime/runtime-logs-screen-view.png" - alt="Runtime component log example" - caption="Runtime component log example" - max-width="50%" -%} - -{:start="5"} -1. To download the log, click **Download**. - The file is downloaded as `.log`. - -### (Hybrid) Restore provisioned runtimes - -In case of cluster failure, restore the provisioned hybrid runtime from the existing runtime installation repository. For partial or complete cluster failures, you can restore the runtime to either the failed cluster or to a different cluster. Restoring the provisioned runtime reinstalls the runtime leveraging the resources in the existing runtime repo. - -Restoring the runtime: -* Applies `argo-cd` from the installation manifests in your repo to your cluster -* Associates `argo-cd` with the existing installation repo -* Applies the runtime and `argo-cd` secrets to the cluster -* Updates the runtime config map (`.yaml` in the `bootstrap` directory) with the new cluster configuration for these fields: - `cluster` - `ingressClassName` - `ingressController` - `ingressHost` - -{::nomarkdown} -

            -{:/} - -#### How to restore a hybrid runtime -Reinstall the hybrid runtime from the existing installation repository to restore it to the same or a different cluster. - -**Before you begin** - -* Have the following information handy: - > All values must be the identical to the runtime to be restored. - * Runtime name - * Repository URL - * Codefresh context - * Kube context: Required if you are restoring to the same cluster - -**How to** - -1. Run: - `cf runtime install --from-repo` -1. Provide the relevant values when prompted. -1. If you are performing the runtime recovery in a different cluster, verify the ingress resource configuration for `app-proxy`, `workflows`, and `default-git-source`. - If the health status remains as `Progressing`, do the following: - - * In the runtime installation repo, check if the `ingress.yaml` files for the `app-proxy` and `workflows` are configured with the correct `host` and `ingressClassName`: - - `apps/app-proxy/overlays//ingress.yaml` - `apps/workflows/overlays//ingress.yaml` - - * In the Git Source repository, check the `host` and `ingressClassName` in `cdp-default-git-source.ingress.yaml`: - - `resources_/cdp-default-git-source.ingress.yaml` - - See the [example](#ingress-example) below. - -{:start="4"} -1. If you have managed clusters registered to the hybrid runtime you are restoring, reconnect them. - Run the command and follow the instructions in the wizard: - `cf cluster add` - -1. Verify that you have a registered Git integration: - `cf integration git list --runtime ` - -1. If needed, create a new Git integration: - `cf integration git add default --runtime --provider github --api-url https://api.github.com` - -{::nomarkdown} -

            -{:/} - -#### Ingress example -This is an example of the `ingress.yaml` for `workflows`. - - ```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - ingress.kubernetes.io/protocol: https - ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/backend-protocol: https - nginx.ingress.kubernetes.io/rewrite-target: /$2 - creationTimestamp: null - name: runtime-name-workflows-ingress - namespace: runtime-name -spec: - ingressClassName: nginx - rules: - - host: your-ingress-host.com - http: - paths: - - backend: - service: - name: argo-server - port: - number: 2746 - path: /workflows(/|$)(.*) - pathType: ImplementationSpecific -status: - loadBalancer: {} -``` - - -### (Hybrid) Configure browser to allow insecure hybrid runtimes - -If at least one of your hybrid runtimes was installed in insecure mode (without an SSL certificate for the ingress controller from a CA), the UI alerts you that _At least one runtime was installed in insecure mode_. -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-insecure-alert.png" - url="/images/runtime/runtime-insecure-alert.png" - alt="Insecure runtime installation alert" - caption="Insecure runtime installation alert" - max-width="100%" -%} - -All you need to do is to configure the browser to trust the URL and receive content. - -1. Select **View Runtimes** to the right of the alert. - You are taken to the Runtimes page, where you can see insecure runtimes tagged as **Allow Insecure**. - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-insecure-steps.png" - url="/images/runtime/runtime-insecure-steps.png" - alt="Insecure runtimes in Runtime page" - caption="Insecure runtimes in Runtime page" - max-width="40%" -%} -{:start="2"} -1. For _every_ insecure runtime, select **Allow Insecure**, and when the browser prompts you to allow access, do as relevant: - -* Chrome: Click **Advanced** and then **Proceed to site**. -* Firefox: Click **Advanced** and then **Accept the risk and continue**. -* Safari: Click **Show Certificate**, and then select **Always allow content from site**. -* Edge: Click **Advanced**, and then select **Continue to site(unsafe)**. - -### (Hybrid) View notifications for hybrid runtimes in Activity Log - -The Activity Log is a quick way to monitor notifications for runtime events such as upgrades. A pull-down panel in the Codefresh toolbar, the Activity Log shows ongoing, success, and error notifications, sorted by date, starting with today's date. - -1. In the Codefresh UI, on the top-right of the toolbar, select ![](/images/pipeline/monitoring/pipeline-activity-log-toolbar.png?display=inline-block) **Activity Log**. -1. To see notifications for provisioned runtimes, filter by **Runtime**. - - {% include image.html - lightbox="true" - file="/images/runtime/runtime-activity-log.png" - url="/images/runtime/runtime-activity-log.png" - alt="Activity Log filtered by Runtime events" - caption="Activity Log filtered by Runtime events" - max-width="30%" - %} - -{:start="3"} - -1. To see more information on an error, select the **+** sign. - -### (Hybrid) Troubleshoot health and sync errors for runtimes - -The ![](/images/icons/error.png?display=inline-block) icon with the runtime in red indicates either health or sync errors. - -**Health errors** -Health errors are generated by Argo CD and by Codefresh for runtime components. - -**Sync errors** -Runtimes with sync errors display an **Out of sync** status in Sync Status column. They are related to discrepancies between the desired and actual state of a runtime component or one of the Git sources associated with the runtime. - -**View errors** -For both views, select the runtime, and then select **Errors Detected**. -Here is an example of health errors for a runtime. - - {% include image.html - lightbox="true" - file="/images/runtime/runtime-health-sync-errors.png" - url="/images/runtime/runtime-health-sync-errors.png" - alt="Health errors for runtime example" - caption="Health errors for runtime example" - max-width="30%" - %} - -### Related articles -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) \ No newline at end of file diff --git a/_docs/runtime/requirements.md b/_docs/runtime/requirements.md deleted file mode 100644 index f8499f4d9..000000000 --- a/_docs/runtime/requirements.md +++ /dev/null @@ -1,742 +0,0 @@ ---- -title: "Hybrid runtime requirements" -description: "" -group: runtime -toc: true ---- - - -The requirements listed are the **_minimum_** requirements to provision **_hybrid runtimes_** in the Codefresh platform. - -> Hosted runtimes are managed by Codefresh. To provision a hosted runtime as part of Hosted GitOps setup, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - ->In the documentation, Kubernetes and K8s are used interchangeably. - -{::nomarkdown} -
            -{:/} - -### Minimum requirements - -{: .table .table-bordered .table-hover} -| Item | Requirement | -| -------------- | -------------- | -|Kubernetes cluster | Server version 1.18 and higher, without Argo Project components. {::nomarkdown}
            Tip: To check the server version, run:
            kubectl version --short.{:/}| -| Ingress controller| Configured on Kubernetes cluster and exposed from the cluster. {::nomarkdown}
            Supported and tested ingress controllers include:
            • Ambassador
            • {:/}(see [Ambassador ingress configuration](#ambassador-ingress-configuration)){::nomarkdown}
            • AWS ALB (Application Load Balancer)
            • {:/} (see [AWS ALB ingress configuration](#aws-alb-ingress-configuration)){::nomarkdown}
            • Istio
            • {:/} (see [Istio ingress configuration](#istio-ingress-configuration)){::nomarkdown}
            • NGINX Enterprise (nginx.org/ingress-controller)
            • {:/} (see [NGINX Enterprise ingress configuration](#nginx-enterprise-ingress-configuration)){::nomarkdown}
            • NGINX Community (k8s.io/ingress-nginx)
            • {:/} (see [NGINX Community ingress configuration](#nginx-community-version-ingress-configuration)){::nomarkdown}
            • Trafik
            • {:/}(see [Traefik ingress configuration](#traefik-ingress-configuration))| -|Node requirements| {::nomarkdown}
              • Memory: 5000 MB
              • CPU: 2
              {:/}| -|Cluster permissions | Cluster admin permissions | -|Git providers |{::nomarkdown}
              • GitHub
              • GitHub Enterprise
              • GitLab Cloud
              • GitLab Server
              • Bitbucket Cloud
              • Bitbucket Server
              {:/}| -|Git access tokens | {::nomarkdown}Git runtime token:
              • Valid expiration date
              • Scopes:
                • GitHub and GitHub Enterprise: repo, admin-repo.hook
                • GitLab Cloud and GitLab Server: api, read_repository
                • Bitbucket Cloud and Server: Permissions: Read, Workspace membership: Read, Webhooks: Read and write, Repositories: Write, Admin
                {:/}| - - - -{::nomarkdown} -

                -{:/} - -### Ambassador ingress configuration -For detailed configuration information, see the [Ambassador ingress controller documentation](https://www.getambassador.io/docs/edge-stack/latest/topics/running/ingress-controller){:target="\_blank"}. - -This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. -* Valid external IP address -* Valid TLS certificate -* TCP support - -{::nomarkdown} -
                -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - {::nomarkdown} -
                -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -

                -{:/} - -### AWS ALB ingress configuration - -For detailed configuration information, see the [ALB AWS ingress controller documentation](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4){:target="\_blank"}. - -This table lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} -| What to configure | When to configure | -| -------------- | -------------- | -|Valid external IP address | _Before_ installing hybrid runtime | -|Valid TLS certificate | | -|TCP support| | -|Controller configuration] | | -|Alias DNS record in route53 to load balancer | _After_ installing hybrid runtime | -|(Optional) Git integration registration | | - -{::nomarkdown} -
                -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                -{:/} - -#### Controller configuration -In the ingress resource file, verify that `spec.controller` is configured as `ingress.k8s.aws/alb`. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: alb -spec: - controller: ingress.k8s.aws/alb -``` - -{::nomarkdown} -
                -{:/} - -#### Create an alias to load balancer in route53 - -> The alias must be configured _after_ installing the hybrid runtime. - -1. Make sure a DNS record is available in the correct hosted zone. -1. _After_ hybrid runtime installation, in Amazon Route 53, create an alias to route traffic to the load balancer that is automatically created during the installation: - * **Record name**: Enter the same record name used in the installation. - * Toggle **Alias** to **ON**. - * From the **Route traffic to** list, select **Alias to Application and Classic Load Balancer**. - * From the list of Regions, select the region. For example, **US East**. - * From the list of load balancers, select the load balancer that was created during installation. - -For more information, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html){:target="\_blank"}. - -{% include image.html - lightbox="true" - file="/images/runtime/post-install-alb-ingress.png" - url="/images/runtime/post-install-alb-ingress.png" - alt="Route 53 record settings for AWS ALB" - caption="Route 53 record settings for AWS ALB" - max-width="60%" -%} - -{::nomarkdown} -
                -{:/} - -#### (Optional) Git integration registration -If the installation failed, as can happen if the DNS record was not created within the timeframe, manually create and register Git integrations using these commands: - `cf integration git add default --runtime --api-url ` - `cf integration git register default --runtime --token ` - -{::nomarkdown} -

                -{:/} - -### Istio ingress configuration -For detailed configuration information, see [Istio ingress controller documentation](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress){:target="\_blank}. - -The table below lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} -| What to configure | When to configure | -| -------------- | -------------- | -|Valid external IP address |_Before_ installing hybrid runtime | -|Valid TLS certificate| | -|TCP support | | -|Cluster routing service | _After_ installing hybrid runtime | - -{::nomarkdown} -
                -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                -{:/} - -#### Cluster routing service -> The cluster routing service must be configured _after_ installing the hybrid runtime. - -Based on the runtime version, you need to configure a single or different `VirtualService` resources for these services: - -##### Runtime version 0.0.543 or higher -Configure a single `VirtualService` resource to route traffic to the `app-proxy`, `webhook`, and `workflow` services, as in the example below. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: pov-codefresh-istio-runtime # replace with your runtime name - name: internal-router -spec: - hosts: - - pov-codefresh-istio-runtime.sales-dev.codefresh.io # replace with your host name - gateways: - - istio-system/internal-router # replace with your gateway name - http: - - match: - - uri: - prefix: /webhooks - route: - - destination: - host: internal-router - port: - number: 80 - - match: - - uri: - prefix: /app-proxy - route: - - destination: - host: internal-router - port: - number: 80 - - match: - - uri: - prefix: /workflows - route: - - destination: - host: internal-router - port: - number: 80 -``` - -##### Runtime version 0.0.542 or lower - -Configure two different `VirtualService` resources, one to route traffic to the `app-proxy`, and the second to route traffic to the `webhook` services, as in the examples below. - -{::nomarkdown} -
                -{:/} - -**`VirtualService` example for `app-proxy`:** - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: cap-app-proxy -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway # replace with your host name - http: - - match: - - uri: - prefix: /app-proxy - route: - - destination: - host: cap-app-proxy - port: - number: 3017 -``` -{::nomarkdown} -
                -{:/} - -**`VirtualService` example for `webhook`:** - -> Configure a `webhook` for each event defined in the event source. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: csdp-default-git-source -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway # replace with your gateway name - http: - - match: - - uri: - prefix: /webhooks/test-runtime3/push-github # replace `test-runtime3` with your runtime name - route: - - destination: - host: push-github-eventsource-svc - port: - number: 80 -``` - - - -{::nomarkdown} -

                -{:/} - -### NGINX Enterprise ingress configuration - -For detailed configuration information, see [NGINX ingress controller documentation](https://docs.nginx.com/nginx-ingress-controller){:target="\_blank}. - -The table below lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} -| What to configure | When to configure | -| -------------- | -------------- | -|Verify valid external IP address |_Before_ installing hybrid runtime | -|Valid TLS certificate | | -|TCP support| | -|NGINX Ingress: Enable report status to cluster | | -|NGINX Ingress Operator: Enable report status to cluster| | -|Patch certificate secret |_After_ installing hybrid runtime - -{::nomarkdown} -
                -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                -{:/} - -#### NGINX Ingress: Enable report status to cluster - -If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. - -* Pass `--report-ingress-status` to `deployment`. - -```yaml -spec: - containers: - - args: - - --report-ingress-status -``` - -{::nomarkdown} -
                -{:/} - -#### NGINX Ingress Operator: Enable report status to cluster - -If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. - -1. Add this to the `Nginxingresscontrollers` resource file: - - ```yaml - ... - spec: - reportIngressStatus: - enable: true - ... - ``` - -1. Make sure you have a certificate secret in the same namespace as the runtime. Copy an existing secret if you don't have one. -You will need to add this to the `ingress-master` when you have completed runtime installation. - -{::nomarkdown} -
                -{:/} - -#### Patch certificate secret -> The certificate secret must be configured _after_ installing the hybrid runtime. - -Patch the certificate secret in `spec.tls` of the `ingress-master` resource. -The secret must be in the same namespace as the runtime. - -1. Go to the runtime namespace with the NGINX ingress controller. -1. In `ingress-master`, add to `spec.tls`: - - ```yaml - tls: - - hosts: - - - secretName: - ``` - -{::nomarkdown} -

                -{:/} - -### NGINX Community version ingress configuration - -Codefresh has been tested with and supports implementations of the major providers. For your convenience, we have provided configuration instructions, both for supported and untested providers in [Provider-specific configuration](#provider-specific-configuration). - - -This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. -* Verify valid external IP address -* Valid TLS certificate -* TCP support - -{::nomarkdown} -
                -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services, and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -Here's an example of TCP configuration for NGINX Community on AWS. -Verify that the `ingress-nginx-controller` service manifest has either of the following annotations: - -`service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"` -OR -`service.beta.kubernetes.io/aws-load-balancer-type: nlb` - -{::nomarkdown} -
                -{:/} - -#### Provider-specific configuration - -> The instructions are valid for `k8s.io/ingress-nginx`, the community version of NGINX. - -
                -AWS -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/aws/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for AWS. -
                -
                -Azure (AKS) -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for AKS. - -
                - -
                -Bare Metal Clusters -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/baremetal/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -Bare-metal clusters often have additional considerations. See Bare-metal ingress-nginx considerations. - -
                - -
                -Digital Ocean -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/do/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Digital Ocean. - -
                - -
                -Docker Desktop -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Docker Desktop.
                -Note: By default, Docker Desktop services will provision with localhost as their external address. Triggers in delivery pipelines cannot reach this instance unless they originate from the same machine where Docker Desktop is being used. - -
                - -
                -Exoscale -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/exoscale/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Exoscale. - -
                - - -
                -Google (GKE) -
                -Add firewall rules -
                -GKE by default limits outbound requests from nodes. For the runtime to communicate with the control-plane in Codefresh, add a firewall-specific rule. - -
                  -
                1. Find your cluster's network:
                  - gcloud container clusters describe [CLUSTER_NAME] --format=get"(network)" -
                2. -
                3. Get the Cluster IPV4 CIDR:
                  - gcloud container clusters describe [CLUSTER_NAME] --format=get"(clusterIpv4Cidr)" -
                4. -
                5. Replace the `[CLUSTER_NAME]`, `[NETWORK]`, and `[CLUSTER_IPV4_CIDR]`, with the relevant values:
                  - gcloud compute firewall-rules create "[CLUSTER_NAME]-to-all-vms-on-network"
                  - - --network="[NETWORK]" \ -
                  - - --source-ranges="[CLUSTER_IPV4_CIDR]" \ -
                  - - --allow=tcp,udp,icmp,esp,ah,sctp -
                  -
                6. -
                -
                -Use ingress-nginx
                -
                  -
                1. Create a `cluster-admin` role binding:
                  - - kubectl create clusterrolebinding cluster-admin-binding \ -
                  - - --clusterrole cluster-admin \ -
                  - - --user $(gcloud config get-value account) -
                  -
                2. -
                3. Apply:
                  - - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml - -
                4. -
                5. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                6. - -
                -We recommend reviewing the provider-specific documentation for GKE. - -
                - - -
                -MicroK8s -
                  -
                1. Install using Microk8s addon system:
                  - microk8s enable ingress -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -MicroK8s has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                - - -
                -MiniKube -
                  -
                1. Install using MiniKube addon system:
                  - minikube addons enable ingress -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -MiniKube has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                - - - -
                -Oracle Cloud Infrastructure -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Oracle Cloud. - -
                - -
                -Scaleway -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/scw/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Scaleway. - -
                - -{::nomarkdown} -

                -{:/} - -### Traefik ingress configuration -For detailed configuration information, see [Traefik ingress controller documentation](https://doc.traefik.io/traefik/providers/kubernetes-ingress){:target="\_blank}. - -The table below lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} - -| What to configure | When to configure | -| -------------- | -------------- | -|Valid external IP address | _Before_ installing hybrid runtime | -|Valid SSL certificate | | -|TCP support | | -|Enable report status to cluster| | - -{::nomarkdown} -
                -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                -{:/} - -#### Enable report status to cluster -By default, the Traefik ingress controller is not configured to report its status to the cluster. If not configured, Argo’s health check reports the health status as “progressing”, resulting in a timeout error during installation. - -To enable reporting its status, add `publishedService` to `providers.kubernetesIngress.ingressEndpoint`. - -The value must be in the format `"/"`, where: - `` is the Traefik service from which to copy the status - -```yaml -... -providers: - kubernetesIngress: - ingressEndpoint: - publishedService: "/" # Example, "codefresh/traefik-default" -... -``` - -{::nomarkdown} -
                -{:/} - -### What to read next -[Hybrid runtime installation flags]({{site.baseurl}}/docs/runtime/installation//#hybrid-runtime-installation-flags) -[Install hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/) diff --git a/_docs/runtime/requirements_orig.md b/_docs/runtime/requirements_orig.md deleted file mode 100644 index 29fad0ee7..000000000 --- a/_docs/runtime/requirements_orig.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: "Hybrid runtime requirements" -description: "" -group: runtime -toc: true ---- - - -The requirements listed are the **_minimum_** requirements to provision **_hybrid runtimes_** in the Codefresh platform. - -> Hosted runtimes are managed by Codefresh. To provision a hosted runtime as part of Hosted GitOps setup, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - ->In the documentation, Kubernetes and K8s are used interchangeably. - -### Requirements - -{: .table .table-bordered .table-hover} -| Item | Requirement | -| -------------- | -------------- | -|Kubernetes cluster | Server version 1.18 and higher, without Argo Project components. Tip: To check the server version, run `kubectl version --short`.| -| Ingress controller| Configured on Kubernetes cluster and exposed from the cluster. {::nomarkdown}

                See XREF {:/}| -|Node requirements| {::nomarkdown}
                • Memory: 5000 MB
                • CPU: 2
                {:/}| -|Runtime namespace | resource permissions| -| | `ServiceAccount`: Create, Delete | -| | `ConfigMap`: Create, Update, Delete | -| | `Service`: Create, Update, Delete | -| | `Role`: In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| |`RoleBinding`: In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| | `persistentvolumeclaims`: Create, Update, Delete | -| | `pods`: Create, Update, Delete | -| Git providers | {::nomarkdown}
                • Hosted: GitHub
                • Hybrid:
                  • GitHub
                  • GitLab
                  • Bitbucket Server
                  • GitHub Enterprise
                | -| Git access tokens | {::nomarkdown}
                • Runtime Git token:
                  • Valid expiration date
                  • Scopes: `repo` and `admin-repo.hook`
                • Runtime Git token:
                  • Valid expiration date
                  • Scopes: `repo` and `admin-repo.hook`
                | - -### NGINX EN - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the EXTERNAL-IP column for your ingress controller shows a valid hostname. - -#### Valid SSL certificate -For secure runtime installation, the ingress controller must have a valid SSL certificate from an authorized CA (Certificate Authority). - -#### TCP support -Configure to handle TCP requests. - -Here's an example of TCP configuration for NGINX on AWS. -Verify that the ingress-nginx-controller service manifest has either of the following annotations: - -`service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"` -OR -`service.beta.kubernetes.io/aws-load-balancer-type: nlb` - - - -* AWS ALB - In the ingress resource file, verify that `spec.controller` is configured as `ingress.k8s.aws/alb`. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: alb -spec: - controller: ingress.k8s.aws/alb -``` - -* Report status - The ingress controller must be configured to report its status. Otherwise, Argo's health check reports the health status as "progressing" resulting in a timeout error during installation. - - By default, NGINX Enterprise and Traefik ingress are not configured to report status. For details on configuration settings, see the following sections in this article: - [NGINX Enterprise ingress configuration](#nginx-enterprise-version-ingress-configuration) - [Traefik ingress configuration](#traefik-ingress-configuration) - - -#### NGINX Enterprise version ingress configuration -The Enterprise version of NGINX (`nginx.org/ingress-controller`), both with and without the Ingress Operator, must be configured to report the status of the ingress controller. - -**Installation with NGINX Ingress** -* Pass the `- -report-ingress-status` to `deployment`. - - ```yaml - spec: - containers: - - args: - - -report-ingress-status - ``` - -**Installation with NGINX Ingress Operator** - -1. Add this to the `Nginxingresscontrollers` resource file: - - ```yaml - ... - spec: - reportIngressStatus: - enable: true - ... - ``` - -1. Make sure you have a certificate secret in the same namespace as the runtime. Copy an existing secret if you don't have one. -You will need to add this to the `ingress-master` when you have completed runtime installation. - -#### NGINX Community version provider-specific ingress configuration -Codefresh has been tested and is supported in major providers. For your convenience, here are provider-specific configuration instructions, both for supported and untested providers. - -> The instructions are valid for `k8s.io/ingress-nginx`, the community version of NGINX. - -
                -AWS -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/aws/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for AWS. -
                -
                -Azure (AKS) -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for AKS. - -
                - -
                -Bare Metal Clusters -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/baremetal/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -Bare-metal clusters often have additional considerations. See Bare-metal ingress-nginx considerations. - -
                - -
                -Digital Ocean -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/do/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Digital Ocean. - -
                - -
                -Docker Desktop -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Docker Desktop.
                -Note: By default, Docker Desktop services will provision with localhost as their external address. Triggers in delivery pipelines cannot reach this instance unless they originate from the same machine where Docker Desktop is being used. - -
                - -
                -Exoscale -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/exoscale/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Exoscale. - -
                - - -
                -Google (GKE) -
                -Add firewall rules -
                -GKE by default limits outbound requests from nodes. For the runtime to communicate with the control-plane in Codefresh, add a firewall-specific rule. - -
                  -
                1. Find your cluster's network:
                  - gcloud container clusters describe [CLUSTER_NAME] --format=get"(network)" -
                2. -
                3. Get the Cluster IPV4 CIDR:
                  - gcloud container clusters describe [CLUSTER_NAME] --format=get"(clusterIpv4Cidr)" -
                4. -
                5. Replace the `[CLUSTER_NAME]`, `[NETWORK]`, and `[CLUSTER_IPV4_CIDR]`, with the relevant values:
                  - gcloud compute firewall-rules create "[CLUSTER_NAME]-to-all-vms-on-network"
                  - - --network="[NETWORK]" \ -
                  - - --source-ranges="[CLUSTER_IPV4_CIDR]" \ -
                  - - --allow=tcp,udp,icmp,esp,ah,sctp -
                  -
                6. -
                -
                -Use ingress-nginx
                -
                  -
                1. Create a `cluster-admin` role binding:
                  - - kubectl create clusterrolebinding cluster-admin-binding \ -
                  - - --clusterrole cluster-admin \ -
                  - - --user $(gcloud config get-value account) -
                  -
                2. -
                3. Apply:
                  - - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml - -
                4. -
                5. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                6. - -
                -We recommend reviewing the provider-specific documentation for GKE. - -
                - - -
                -MicroK8s -
                  -
                1. Install using Microk8s addon system:
                  - microk8s enable ingress -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -MicroK8s has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                - - -
                -MiniKube -
                  -
                1. Install using MiniKube addon system:
                  - minikube addons enable ingress -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -MiniKube has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                - - - -
                -Oracle Cloud Infrastructure -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Oracle Cloud. - -
                - -
                -Scaleway -
                  -
                1. Apply:
                  - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/scw/deploy.yaml -
                2. -
                3. Verify a valid external address exists:
                  - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                4. -
                -For additional configuration options, see ingress-nginx documentation for Scaleway. - -
                -
                - -#### Traefik ingress configuration -To enable the the Traefik ingress controller to report the status, add `publishedService` to `providers.kubernetesIngress.ingressEndpoint`. - -The value must be in the format `"/"`, where: - `` is the Traefik service from which to copy the status - - ```yaml - ... - providers: - kubernetesIngress: - ingressEndpoint: - publishedService: "/" # Example, "codefresh/traefik-default" ... - ... - ``` - -#### - -#### Runtime namespace permissions for resources - -{: .table .table-bordered .table-hover} -| Resource | Permissions Required| -| -------------- | -------------- | -| `ServiceAccount` | Create, Delete | -| `ConfigMap` | Create, Update, Delete | -| `Service` | Create, Update, Delete | -| `Role` | In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| `RoleBinding` | In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| `persistentvolumeclaims` | Create, Update, Delete | -| `pods` | Creat, Update, Delete | - -### Git repository requirements -This section lists the requirements for Git installation repositories. - -#### Git installation repo -If you are using an existing repo, make sure it is empty. - -#### Git access tokens -Codefresh requires two access tokens, one for runtime installation, and the second, a personal token for each user to authenticate Git-based actions in Codefresh. - -##### Git runtime token -The Git runtime token is mandatory for runtime installation. - -The token must have valid: - * Expiration date: Default is `30 days` - * Scopes: `repo` and `admin-repo.hook` - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - url="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - alt="Scopes for Git runtime token" - caption="Scopes for Git runtime token" - max-width="30%" - %} - -##### Git user token for Git-based actions -The Git user token is the user's personal token and is unique to every user. It is used to authenticate every Git-based action of the user in Codefresh. You can add the Git user token at any time from the UI. - - The token must have valid: - * Expiration date: Default is `30 days` - * Scope: `repo` - - {% include - image.html - lightbox="true" - file="/images/runtime/git-token-scope-resource-repos.png" - url="/images/runtime/git-token-scope-resource-repos.png" - alt="Scope for Git personal user token" - caption="Scope for Git personal user token" - max-width="30%" - %} - -For detailed information on GitHub tokens, see [Creating a personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token). - - -### What to read next -[Installing hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/) diff --git a/_docs/runtime/runtime-recovery.md b/_docs/runtime/runtime-recovery.md deleted file mode 100644 index a645ee625..000000000 --- a/_docs/runtime/runtime-recovery.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "Restore runtimes" -description: "" -group: runtime -toc: true ---- - -In case of cluster failure, restore the runtime from the existing runtime installation repository. For partial or complete cluster failures, you can restore the runtime to either the failed cluster or to a different cluster. Restoring the runtime reinstalls the runtime leveraging the resources in the existing runtime repo. - -Restoring the runtime: -* Applies `argo-cd` from the installation manifests in your repo to your cluster -* Associates `argo-cd` with the existing installation repo -* Applies the runtime and `argo-cd` secrets to the cluster -* Updates the runtime config map (`.yaml` in the `bootstrap` directory) with the new cluster configuration for these fields: - `cluster` - `ingressClassName` - `ingressController` - `ingressHost` - - -### How to restore a runtime -Reinstall the runtime from the existing installation repository to restore it to the same or a different cluster. - -**Before you begin** - -* Have the following information handy: - > All values must be the identical to the runtime to be restored. - * Runtime name - * Repository URL - * Codefresh context - * Kube context: Required if you are restoring to the same cluster - -**How to** - -1. Run: - `cf runtime install --from-repo` -1. Provide the relevant values when prompted. -1. If you are performing runtime recovery in a different cluster, verify the ingress resource configuration for `app-proxy`, `workflows`, and `default-git-source`. - If the health status remains as `Progressing`, do the following: - - * In the runtime installation repo, check if the `ingress.yaml` files for the `app-proxy` and `workflows` are configured with the correct `host` and `ingressClassName`: - - `apps/app-proxy/overlays//ingress.yaml` - `apps/workflows/overlays//ingress.yaml` - - * In the Git Source repository, check the `host` and `ingressClassName` in `cdp-default-git-source.ingress.yaml`: - - `resources_/cdp-default-git-source.ingress.yaml` - - See the [example](#ingress-example) below. - -{:start="4"} -1. If you have managed clusters registered to the runtime you are restoring, reconnect them. - Run the command and follow the instructions in the wizard: - `cf cluster add` - -1. Verify that you have a registered Git integration: - `cf integration git list --runtime ` - -1. If needed, create a new Git integration: - `cf integration git add default --runtime --provider github --api-url https://api.github.com` - - - -### Ingress example -This is an example of the `ingress.yaml` for `workflows`. - - ```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - ingress.kubernetes.io/protocol: https - ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/backend-protocol: https - nginx.ingress.kubernetes.io/rewrite-target: /$2 - creationTimestamp: null - name: runtime-name-workflows-ingress - namespace: runtime-name -spec: - ingressClassName: nginx - rules: - - host: your-ingress-host.com - http: - paths: - - backend: - service: - name: argo-server - port: - number: 2746 - path: /workflows(/|$)(.*) - pathType: ImplementationSpecific -status: - loadBalancer: {} -``` - -### What to read next -[Manage runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Manage Git Sources]({{site.baseurl}}/docs/runtime/git-sources/) -[Managed clusters]({{site.baseurl}}/docs/runtime/managed-cluster/) \ No newline at end of file diff --git a/_docs/administration/single-sign-on/sso-ldap.md b/_docs/single-sign-on/ldap.md similarity index 97% rename from _docs/administration/single-sign-on/sso-ldap.md rename to _docs/single-sign-on/ldap.md index c3d4369ee..e0a112326 100644 --- a/_docs/administration/single-sign-on/sso-ldap.md +++ b/_docs/single-sign-on/ldap.md @@ -36,7 +36,7 @@ Make sure also that you know the scope of the search, that is, where users to se * **Certificate**: The security certificate of the LDAP server for `ldaps` only. Paste the value directly on the field. Do not convert to base64 or any other encoding by hand. Leave the field empty if you use `ldap`. {:start="4"} -1. Select **+ Add**. LDAP users can log in to Codefresh. +1. Select **+ Add**. LDAP users can now log in to Codefresh. >Each user who logs in to Codefresh must: 1. Have a defined email address on the LDAP server diff --git a/_docs/single-sign-on/oidc.md b/_docs/single-sign-on/oidc.md new file mode 100644 index 000000000..79a86e816 --- /dev/null +++ b/_docs/single-sign-on/oidc.md @@ -0,0 +1,66 @@ +--- +title: "Setting up OIDC Federated SSO" +description: "OpenID Connect (OIDC) Single Sign-On (SSO) setup" +group: single-sign-on +toc: true +--- + +Codefresh natively supports login using GitHub, Bitbucket and GitLab using the OpenID Connect (OAuth2) protocol. + + +## Prerequisites + +To successfully add an identity provider (IdP) in Codefresh, you need to do some preparatory work with both Codefresh and the provider: + +1. Inform your IdP that it will provide SSO services to Codefresh +1. Set up Codefresh and point it to your IdP. + +The first procedure differs according to your IdP, but the second one is common to all providers. + +>SSO is only available to Enterprise customers. Please [contact sales](https://codefresh.io/contact-sales/){:target="\_blank"} in order to enable it for your Codefresh account. + +## OIDC SSO configuration in Codefresh + +Here's what you need to do to configure SSO via OIDC in Codefresh: + +1. Configure SSO settings for the IdP: + This generally includes defining settings both in Codefresh and in the IdP. + Codefresh supports OIDC SSO for the following: + * [Auth0]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-auth0/) + * [Azure]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-azure/) + * [Google]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-google/) + * [Okta]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-okta/) + * [OneLogin]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-onelogin/) + +1. Test integration with the IdP: + + >Before enabling SSO for users in Codefresh, you **MUST** make sure that it is working for the test user. + When SSO is enabled for a user, Codefresh allows login only through the SSO for the user and blocks logins through other IdPs. If the selected SSO method does not work for some reason, the is locked out of Codefresh. + + 1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. + 1. In the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. + 1. Add an active user to be used for testing. We recommend you use your own user. + 1. From the **SSO** dropdown, select the provider you want to test. + 1. Keep the current browser session open, and log in via Corporate SSO in an incognito tab (or another browser). + + {% include image.html + lightbox="true" + file="/images/administration/sso/sign-with-sso.png" + url="/images/administration/sso/sign-with-sso.png" + alt="Sign-in with SSO" + caption="Sign-in with SSO" + max-width="50%" + %} + +{:start="2"} +1. (Optional) [Set an IdP as the default provider]({{site.baseurl}}/docs/single-sign-on/team-sync/#set-a-default-sso-provider-for-account) + You can select an IdP as the default SSO provider for a Codefresh account. This means that all the new users added to that account will automatically use the selected IdP for signin. +1. (Optional) [Set the SSO method for each user]({{site.baseurl}}/docs/single-sign-on/team-sync/#select-sso-method-for-individual-users) + You can also select if needed, a different SSO provider for every user or for specific users. + +> Codefresh has an internal cache for SSO configuration, and it can take up to five minutes for your changes to take effect. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) + diff --git a/_docs/administration/single-sign-on/sso-auth0.md b/_docs/single-sign-on/oidc/oidc-auth0.md similarity index 79% rename from _docs/administration/single-sign-on/sso-auth0.md rename to _docs/single-sign-on/oidc/oidc-auth0.md index 5153a3d33..4c052c347 100644 --- a/_docs/administration/single-sign-on/sso-auth0.md +++ b/_docs/single-sign-on/oidc/oidc-auth0.md @@ -1,18 +1,23 @@ --- title: "Auth0" -description: "Set Up Auth0 Single Sign-On (SSO)" -group: administration -sub_group: single-sign-on +description: "Set up Auth0 Single Sign-On (SSO)" +group: single-sign-on +sub_group: oidc redirect_from: - /docs/enterprise/sso-auth0/ - /docs/enterprise/single-sign-on/sso-auth0/ toc: true --- +Set up SSO for Auth0 using OIDC. Auth0 is one of the SSO providers that Codefresh supports for authentication and authorization. -Create an SSO account for Auth0 in Codefresh. Start by creating an Auth0 application, then create the SSO account for Auth0 in Codefresh, and finally define the SSO settings for the application you created in Auth0. -### 1. Create an Auth0 application +Set up OIDC SSO for Auth0 in Codefresh by: +1. Creating an Auth0 application in Auth0 +1. Creating the SSO account for Auth0 in Codefresh +1. Definng the SSO settings for the application you created in Auth0 + +## Step 1: Create an Auth0 application First create a new application in Auth0. 1. Log in to Auth0. @@ -47,12 +52,12 @@ max-width="40%" %} {:start="5"} -1. Continue with _Create SSO account for Auth0 in Codefresh_. +1. Continue with [Step 2: Create SSO account for Auth0 in Codefresh](#step-2-create-sso-account-for-auth0-in-codefresh). -### 2. Create SSO account for Auth0 in Codefresh +## Step 2: Create SSO account for Auth0 in Codefresh After creating an Auth0 application, create an SSO account for OAuth0 in Codefresh. -1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on). +1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. 1. Click **Add Single Sign-On**. 1. For the Single Sign-On Service, select **Auth0**, and click **Next**. 1. Define the connection details: @@ -75,10 +80,10 @@ max-width="40%" {:start="5"} 1. Click **Save**. 1. Copy the Client Name that is assigned to identify this SSO account. You will have to add it to the Auth0 application. -1. Continue with _Define SSO settings in Auth0 application_. +1. Continue with [Step 3: Define SSO settings in Auth0 application](#step-3-define-sso-settings-in-auth0-application). -### 3. Define SSO settings in Auth0 application +## Step 3: Define SSO settings in Auth0 application As the final step in Auth0 SSO setup, return to Auth0, and then define the Login URI and Callback URL for the Auth0 application you created in 1. 1. From the sidebar, select **Applications > Applications**. @@ -101,8 +106,9 @@ max-width="50%" You have completed SSO setup for Auth0 in Codefresh. -### Related articles +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) -[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/administration/single-sign-on/) diff --git a/_docs/single-sign-on/oidc/oidc-azure.md b/_docs/single-sign-on/oidc/oidc-azure.md new file mode 100644 index 000000000..54df28d48 --- /dev/null +++ b/_docs/single-sign-on/oidc/oidc-azure.md @@ -0,0 +1,259 @@ +--- +title: "Azure Single Sign-On (SSO)" +description: " " +group: single-sign-on +sub_group: oidc +redirect_from: + - /docs/enterprise/single-sign-on/sso-azure/ +toc: true +--- + +Set up SSO for Azure using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). + +Set up OIDC SSO for Azure in Codefresh by: +1. Registering the Codefresh application in Azure +1. Configuring permissions for the Codefresh application in Azure +1. Creating the Client secret in Azure +1. Completing SSO configuration for Azure in Codefresh +1. Configuring redirect URIs in Azure + + +## Prerequisites + +Make sure that your user in Azure who creates the application is assigned either of these roles: +**Application Administrator** +OR +**Global Administrator** + +If the user who creates the Azure application is not assigned to either of these roles, you will be unable to sync teams from Azure to Codefresh. + + +## Step 1: Register the Codefresh application in Azure + +To setup Azure Active Directory for SSO, first register a new application in Azure. + +1. Log in to the **Azure Portal**, and from the sidebar, select **Azure Active Directory**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/register-app-select-azure-ad.png" +url="/images/sso/azure/register-app-select-azure-ad.png" +alt="Azure Active Directory" +caption="Azure Active Directory" +max-width="70%" +%} + +{:start="2"} +1. From the sidebar, select **App registrations**, and then click **+ New registration**. +1. Enter a name for the application, for example, `Codefresh`, and retain the default values for all other settings. + +{% include image.html +lightbox="true" +file="/images/sso/azure/register-app-name.png" +url="/images/sso/azure/register-app-name.png" +alt="Enter name and register application" +caption="Enter name and register application" +max-width="70%" +%} + +{:start="4"} +1. To apply your changes, click **Register**. The application is created and registered in Azure AD. +1. Continue with [Step 2: Configure permissions for the application in Azure](#step-2-configure-permissions-for-the-application-in-azure). + + +## Step 2: Configure permissions for the application in Azure + +Once the application has been created and registered, configure the required permissions. + +1. Click the name of the application to open **Settings**. +1. Do the following: + * Select **API permissions**, and then click **+ Add a permission**. + * From **Request API Permissions**, select **Microsoft APIs**, and then select **Microsoft Graph**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/config-app-permissions-microsoft-graph.png" +url="/images/sso/azure/config-app-permissions-microsoft-graph.png" +alt="Select Microsoft Graph" +caption="Select Microsoft Graph" +max-width="70%" +%} + +{:start="3"} +1. Click **Application permissions** on the left, and select `Group > Read.All`. + +> The `User.Read.All (Delegated)` permission is added by default. + +{% include image.html +lightbox="true" +file="/images/sso/azure/config-app-permissions-selected.png" +url="/images/sso/azure/config-app-permissions-selected.png" +alt="`Group > Read.All` permissions for Microsoft Graph" +caption="`Group > Read.All` permissions for Microsoft Graph" +max-width="70%" +%} + +{:start="4"} +1. Click **Add Permissions**. +1. Click **Grant admin consent for Default Directory** on the bar. + +{% include image.html +lightbox="true" +file="/images/sso/azure/config-app-permissions-added.png" +url="/images/sso/azure/config-app-permissions-added.png" +alt="Grant admin consent for Default Directory" +caption="Grant admin consent for Default Directory" +max-width="70%" +%} + +{:start="6"} +1. Continue with [Step 3: Create client secret in Azure](#step-3-create-client-secret-in-azure). + + +## Step 3: Create client secret in Azure + +Create a client secret for the application. You will need to provide it when you set up SSO for Azure in Codefresh. + +1. From the sidebar, select **Certificates & secrets**, and then click **+ New client secret**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/client-secret-select-option.png" +url="/images/sso/azure/client-secret-select-option.png" +alt="Create client secret" +caption="Create client secret" +max-width="70%" +%} + +{:start="2"} +1. Optional. Add a meaningful description for the client secret, and either retain the default expiry date or define a custom one. + +{% include image.html +lightbox="true" +file="/images/sso/azure/client-secret-add-description.png" +url="/images/sso/azure/client-secret-add-description.png" +alt="Description for client secret" +caption="Description for client secret" +max-width="70%" +%} + +> Tip: Make a note of the expiry date in your calendar to renew the key before the expiry date and prevent service interruptions. + +{:start="3"} +1. Click **Add**. + **Copy the secret key**, as you will need to provide it on setting up Azure SSO in Codefresh. +1. Continue with [Step 4: Configure SSO settinggs for Azure in Codefresh](#step-4-configure-sso-settings-for-azure-in-codefresh). + +## Step 4: Configure SSO settings for Azure in Codefresh + +Configure SSO for Azure in the Codefresh UI. + +**Before you begin** +* From Azure AD: + * Have your client secret handy + * Go to the application you created, and note down these **Properties: Application ID and Object ID** + + {% include image.html +lightbox="true" +file="/images/sso/azure/azure-properties-object-app-ids.png" +url="/images/sso/azure/azure-properties-object-app-ids.png" +alt="Application and Object IDs in Azure" +caption="Application and Object IDs in Azure" +max-width="70%" +%} + + +**How to** + +1. In the Codefresh UI, select **Account Settings**, and then from the sidebar, select **Single Sign On**. +1. Click **Add Single Sign-On**, and select **Azure AD**. +1. Enter the following: + * **Client Name**: For auto-generation, leave empty. Codefresh generates the client name once you save the settings. + * **Display Name**: Meaningful name for the SSO provider - Shown as display name in Azure (see below) + * **Access token** and **Application ID**: The Application ID from your Enterprise Application Properties in Azure AD. + * **Client Secret**: The key value you copied when you created the client secret in Azure. + * **Tenant**: `mycompany.onmicrosoft.com` or the ID of `0example1-0000-0aa0-a00a-1example0` + * **Object ID**: The Object ID from your Enterprise Application Properties in Azure AD. + * **Auto Sync users and teams to Codefresh**: Select to automatically sync user accounts in Azure AD to your Codefresh account. Optionally, define the time interval, in hours, at which to sync, from 1 to 24. If you don’t specify an interval, the sync is every 12 hours. + + {% include image.html +lightbox="true" +file="/images/sso/azure/sso-codefresh-settings.png" +url="/images/sso/azure/sso-codefresh-settings.png" +alt="SSO settings for Azure in Codefresh" +caption="SSO settings for Azure in Codefres" +max-width="70%" +%} + +{:start="4"} +1. Click **Save**. + If you left the Client Name empty, Codefresh generates one (as in the example below). Codefresh uses this name to identify the SSO configuration. + +{% include image.html +lightbox="true" +file="/images/sso/azure/sso-codefresh-generated-client-id.png" +url="/images/sso/azure/sso-codefresh-generated-client-id.png" +alt="Example of Codefresh-generated Client Name for Azure" +caption="Example of Codefresh-generated Client Name for Azure" +max-width="50%" +%} + + We will need this value in the reply URL setting (back in the Azure portal UI). +1. Continue with [Step 5: Configure redirect URIs in Azure](#step-5-configure-redirect-uris-in-azure). + + +## Step 5: Configure redirect URIs in Azure + +As the final step, add the Codefresh callback URL to the allowed reply URLs for the created application in Azure. + +**Before you begin** +* Make sure you have the Client Name for the Azure SSO configuration from Codefresh + + +**How to** + +1. Go to **Azure Active Directory > Apps registrations**, and select the application you registered for SSO. +1. From the sidebar, select **Authentication**. +1. Below **Platform Configuration**, click **Add a platform** and then select **Web**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/redirect-uri-web-configure.png" +url="/images/sso/azure/redirect-uri-web-configure.png" +alt="Select Web configuration settings" +caption="Select Web configuration settings" +max-width="70%" +%} + +{:start="4"} +1. In the Configure Web form, do the following: + * In the **Redirect URIs** field, enter the redirect URI in the format below: + `https://g.codefresh.io/api/auth//callback` + where: + `` is the Client Name shown in the SSO configuration, either defined by you or created by Codefresh. + * Select **ID tokens**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/redirect-rui-define-select-id-tokens.png" +url="/images/sso/azure/redirect-rui-define-select-id-tokens.png" +alt="Web configuration settings" +caption="Web configuration settings" +max-width="70%" +%} + +You have now completed the SSO setup for Azure using OIDC. + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) diff --git a/_docs/administration/single-sign-on/sso-google.md b/_docs/single-sign-on/oidc/oidc-google.md similarity index 70% rename from _docs/administration/single-sign-on/sso-google.md rename to _docs/single-sign-on/oidc/oidc-google.md index e6d6a6a90..e69e94936 100644 --- a/_docs/administration/single-sign-on/sso-google.md +++ b/_docs/single-sign-on/oidc/oidc-google.md @@ -1,18 +1,23 @@ --- title: "Google Single Sign-On (SSO)" description: "" -group: administration -sub_group: single-sign-on +group: single-sign-on +sub_group: oidc toc: true --- -Setting up SSO for Google in Codefresh requires you to create a client secret for Codefresh in Google, configure SSO settings in Codefresh and then define the redirect URIs, also in Google. -For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). +Set up SSO for Google using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). +Set up OIDC SSO for Google in Codefresh by: +1. Creating the client secret in Google +1. Configuring SSO settings for Google in Codefresh +1. Setting up the redirect URI in Google -### Create Client Secret -1. Log in to [https://console.developers.google.com/](https://console.developers.google.com/). +## Step 1: Create Client Secret in Google + +1. Log in to [https://console.developers.google.com/](https://console.developers.google.com/){:target="\_blank"}. 1. From the sidebar, select **Credentials**. 1. Select **Create Credentials**, and from the drop-down, select **OAuth client ID**. 1. Do the following: @@ -26,9 +31,9 @@ For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/ url="/images/administration/sso/google/googleSSO.png" alt="Creating an OAuth client" caption="Creating an OAuth client" - max-width="30%" + max-width="70%" %} - + * Select **Create**. * From the OAUth client created dialog, note down **Your Client ID** and **Your Client Secret**. @@ -38,13 +43,15 @@ For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/ url="/images/administration/sso/google/googleSSO2.png" alt="Getting the Client ID and secret" caption="Getting the Client ID and secret" - max-width="30%" + max-width="70%" %} -You will need the Client ID and secret to configure SSO for Google in Codefresh. + You will need the Client ID and secret to configure SSO for Google in Codefresh. +{:start="5"} +1. Continue with [Step 2: Configure SSO settings for Google in Codefresh](#step-2-configure-sso-settings-for-google-in-codefresh). -### Configure SSO for Google in Codefresh +## Step 2: Configure SSO settings for Google in Codefresh 1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. 1. Select **+ Add Single Sign-On**, **Google**, and then **Next**. @@ -80,8 +87,9 @@ You will need the Client ID and secret to configure SSO for Google in Codefresh. {:start="5"} 1. Note down the Client Name, as you need it to set the redirect URI in Google. +1. Continue with [Step 3: Set up Redirect URI in Google](#step-3-set-up-redirect-uri-in-google). -### Set up Redirect URI +### Step 3: Set up Redirect URI in Google 1. Go back to the Google Console Developer dashboard, and click the edit button on the OAuth 2.0 Client IDs that you created before. 1. For **Authorized Redirect URIs**, in the **URIs** field, enter the Client Name you noted down to generate the *Authorized Redirect URIs* * Example Client Name: `t0nlUJoqQlDv` @@ -96,9 +104,9 @@ You will need the Client ID and secret to configure SSO for Google in Codefresh. max-width="30%" %} -This concludes the basic SSO setup for Google. +You have now completed SSO setup for Google via OIDC. -### Synchronize teams via Codefresh CLI +## Synchronize teams via Codefresh CLI For team/group synchronization you also need a service account. In the Codefresh configuration screen there are some optional fields that you can fill, in order to @@ -138,7 +146,16 @@ get team synchronization via the Codefresh CLI. You need to create a service acc * `JSON Keyfile`: The contents of the JSON file * `Admin email`: The user `admin.google.com` -Now you can [synchronize teams using the Codefresh CLI]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. -#### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. \ No newline at end of file +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up OIDC Federated SSO]({{site.baseurl}}/docs/single-sign-on/oidc) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/administration/single-sign-on/sso-okta.md b/_docs/single-sign-on/oidc/oidc-okta.md similarity index 69% rename from _docs/administration/single-sign-on/sso-okta.md rename to _docs/single-sign-on/oidc/oidc-okta.md index fdaa0e6a4..8e649c2f1 100644 --- a/_docs/administration/single-sign-on/sso-okta.md +++ b/_docs/single-sign-on/oidc/oidc-okta.md @@ -1,20 +1,25 @@ --- title: "Okta Single Sign-On (SSO)" description: " " -group: administration -sub_group: single-sign-on +group: single-sign-on +sub_group: oidc redirect_from: - /docs/enterprise/single-sign-on/sso-okta/ toc: true --- +Set up SSO for Okta using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). -In this page we will see the process of setting up Okta SSO with Codefresh. For the general instructions of SSO setup -see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). -### Set up Okta as an Identity provider +Set up OIDC SSO for Okta in Codefresh by: +1. Setting up Okta as an IdP Codefreh in Okta +1. Configuring SSO settings for Okta in Codefresh +1. Configuring URIs in Okta + +## Step 1: Set up Okta as an identity provider 1. Log in to your Okta account, or create an Okta account if you don't have one. -1. On the general Okta dashboard, to open the Okta Admin Dashboard select **Admin**. +1. In the general Okta dashboard, to open the Okta Admin Dashboard, select **Admin**. {% include image.html lightbox="true" @@ -22,7 +27,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image5.png" alt="Okta Dashboard" caption="Okta Dashboard" - max-width="30%" + max-width="70%" %} {:start="3"} @@ -34,7 +39,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image8.png" alt="Okta Applications" caption="Okta Applications" - max-width="30%" + max-width="70%" %} {:start="4"} @@ -46,7 +51,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image9.png" alt="Create new application" caption="Create new application" - max-width="30%" + max-width="70%" %} {:start="5"} @@ -55,21 +60,23 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- * For the **Sign on method**, select **OpenID Connect**. * Select **Create**. - {% include image.html + {% include image.html lightbox="true" file="/images/administration/sso/okta/image1.png" url="/images/administration/sso/okta/image1.png" alt="Choose Sign-on method" caption="Choose Sign-on method" - max-width="30%" + max-width="70%" %} {:start="6"} 1. Configure OIDC integration in **General Settings**: * App name (e.g. Codefresh). * App logo (optional). Feel free to download and add this [picture]({{site.baseurl}}/images/administration/sso/okta/codefresh-logo.png). - * Login redirect URI: `https://g.codefresh.io/api/auth//callback` - where is generated by Codefresh when you configure SSO settings. For now, use a temp value such as `https://g.codefresh.io/api/auth/temp/callback`. + * Login redirect URI: `https://g.codefresh.io/api/auth//callback` + where: + is generated by Codefresh when you configure SSO settings. + For now, use a temp value such as `https://g.codefresh.io/api/auth/temp/callback`. {% include image.html lightbox="true" @@ -77,12 +84,12 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image4.png" alt="OpenID integration" caption="OpenID integration" - max-width="30%" + max-width="70%" %} * Select **Save**. -### Okta settings needed for SSO in Codefresh -To configure SSO settings for Okta in Codefresh, you meed the Client ID, Client Secret, Access token, and the Codefresh application ID as defined in Okta. +## Configure OIDC SSO settings for Okta in Codefresh +To configure OIDC SSO settings for Okta in Codefresh, you need the Client ID, Client Secret, Access token, and the Codefresh application ID as defined in Okta. Copy the values from the following screens: {% include image.html @@ -91,7 +98,7 @@ file="/images/administration/sso/okta/image7.png" url="/images/administration/sso/okta/image7.png" alt="Client ID and secret" caption="Client ID and secret" -max-width="30%" +max-width="70%" %} {% include image.html @@ -100,7 +107,7 @@ file="/images/administration/sso/okta/image2.png" url="/images/administration/sso/okta/image2.png" alt="Access token" caption="Access token" -max-width="30%" +max-width="70%" %} {% include image.html @@ -109,7 +116,7 @@ file="/images/administration/sso/okta/image3.png" url="/images/administration/sso/okta/image3.png" alt="App ID" caption="App ID" -max-width="30%" +max-width="70%" %} ### Configure SSO for Okta in Codefresh @@ -136,8 +143,12 @@ max-width="30%" Do not copy the URL from the admin view (e.g. `https://-admin.okta.com`), as it will not work. * **Access Token**: Optional. The OKTA API token used to sync groups and their users from OKTA to Codefresh. The token can be generated in OKTA by going to the security tab->API (see above). * **Application ID**: The Codefresh application ID in your OKTA organization, that will be used to sync groups and user from OKTA to Codefresh. This ID can be taken by navigating to your Codefresh APP in OKTA and copy it from the URL (see above). -1. Optional. To automatically sync teams or groups in Okta to Codefresh, set **Auto group sync** to **ON**. This action syncs groups every 12 hours. -1. Select **+Add**. Codefresh automatically generates the **Client Name** to which to identify the SSO configuration. +1. Optional. To automatically sync teams or groups in Okta to Codefresh, set **Auto group sync** to **ON**. + This action syncs groups every 12 hours. + > Though you can assign an Okta application to both groups and individual users, Codefresh _only syncs users who are part of teams_. + New users in Okta, _not_ assigned to a team, are **NOT** synced with Codefresh. You should first assign the user to a team for the sync to work. +1. Select **+Add**. + Codefresh automatically generates the **Client Name** to which to identify the SSO configuration. Note it down. {% include image.html @@ -146,27 +157,26 @@ max-width="30%" url="/images/administration/sso/okta/image6.png" alt="Client name" caption="Client name" - max-width="30%" + max-width="70%" %} -### Configure URIs in Okta +## Configure URIs in Okta 1. In the Okta application, go to **General Settings**, and update the following with the client name generated by Codefresh: - * Login redirect URIs - `https://g.codefresh.io/api/auth//callback` - * Initiate login URI - `https://g.codefresh.io/api/auth/` + * Login redirect URIs: `https://g.codefresh.io/api/auth//callback` + * Initiate login URI: `https://g.codefresh.io/api/auth/` You have now completed SSO setup for Okta. -### How Okta syncing works -[Syncing with Okta]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) +## How Okta syncing works +[Syncing with Okta]({{site.baseurl}}/docs/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) only affects teams/groups, and not individual users. -Codefresh only syncs users who are part of teams, though you can assign an Okta application to both groups and individual users. -New users in Okta, _not_ assigned to a team, are **NOT** synced with Codefresh. You should first assign the user to a team for the sync to work. + ### Sync teams after initial SSO setup There are two ways to set up automatic syncing of teams: -* Pipeline running a CLI command: Create a Codefresh pipeline the runs the CLI command `codefresh synchronize teams my-okta-client-name -t okta` as explained in the [pipeline sync page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). +* Pipeline running a CLI command: Create a Codefresh pipeline the runs the CLI command `codefresh synchronize teams my-okta-client-name -t okta` as explained in the [pipeline sync page]({{site.baseurl}}/docs/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). * Turn on the auto-sync toggle as part of the SSO configuration settings.: {% include image.html lightbox="true" @@ -177,6 +187,7 @@ There are two ways to set up automatic syncing of teams: max-width="50%" %} -### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) diff --git a/_docs/administration/single-sign-on/sso-onelogin.md b/_docs/single-sign-on/oidc/oidc-onelogin.md similarity index 63% rename from _docs/administration/single-sign-on/sso-onelogin.md rename to _docs/single-sign-on/oidc/oidc-onelogin.md index 3e6213f80..bb66f2682 100644 --- a/_docs/administration/single-sign-on/sso-onelogin.md +++ b/_docs/single-sign-on/oidc/oidc-onelogin.md @@ -1,23 +1,25 @@ --- title: "OneLogin Single Sign-On (SSO)" description: " " -group: administration -sub_group: single-sign-on +group: single-sign-on +sub_group: oidc redirect_from: - /docs/enterprise/single-sign-on/sso-onelogin/ toc: true --- -In this page we will see the process of setting up OneLogin SSO with Codefresh. For the general instructions of SSO setup -see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). +Set up SSO for OneLogin using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). +Set up OIDC SSO for OneLogin in Codefresh by: +1. Setting up OneLogin as an IdP +1. Configuring SSO settings for OneLogin in Codefresh +1. Configuring URIs in Okta -## Set up OneLogin as an Identity provider +## Step 1: Set up OneLogin as an identity provider for Codefresh +Configure the application in the OneLogin dashboard. - -1. Configure app on the OneLogin dashboard: - {:start="1"} - 1. Log in to the [OneLogin Administration Dashboard](https://www.onelogin.com/), and select **Apps > Add Apps**. +1. Log in to the [OneLogin Administration Dashboard](https://www.onelogin.com/){:target="\_blank"}, and select **Apps > Add Apps**. {% include image.html lightbox="true" @@ -25,10 +27,11 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step1.png" alt="OneLogin Dashboard" caption="OneLogin Dashboard" - max-width="30%" + max-width="50%" %} - {:start="2"} - 1. Find **OpenId Connect (OIDC)** app using the search field. + +{:start="2"} +1. Find **OpenId Connect (OIDC)** app using the search field. {% include image.html lightbox="true" @@ -36,10 +39,11 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step2.png" alt="Locating the OpenId Connect App" caption="Locating the OpenId Connect App" - max-width="30%" + max-width="50%" %} - {:start="3"} - 1. Setup a Codefresh application. + +{:start="3"} +1. Set up a Codefresh application. {% include image.html lightbox="true" @@ -47,10 +51,11 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step3.png" alt="Adding a new application" caption="Adding a new application" - max-width="30%" + max-width="50%" %} - {:start="4"} - 1. From the sidebar, select **SSO** and copy the **Client ID** and the **Client Secret**. + +{:start="4"} +1. From the sidebar, select **SSO** and copy the **Client ID** and the **Client Secret**. Set **Application Type** to **Web**, and **Token endpoint Authentication** to **POST**. {% include image.html @@ -59,10 +64,13 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step4-post.png" alt="Copying the values of Client ID and Secret" caption="Copying the values of Client ID and Secret" - max-width="30%" + max-width="50%" %} -### Configure SSO for OneLogin in Codefresh +{:start="5"} +1. Continue with [Step 2: Configure SSO for OneLogin in Codefresh](#step-2-configure-sso-for-onelogin-in-codefresh). + +## Step 2: Configure SSO for OneLogin in Codefresh 1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. 1. Select **+ Add Single Sign-On** and then select **OneLogin**. @@ -73,7 +81,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/sso-csdp-onelogin.png" alt="SSO settings for OneLogin in Codefresh" caption="SSO settings for OneLogin in Codefresh" - max-width="30%" + max-width="50%" %} {:start="2"} @@ -83,7 +91,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- * **Client ID**: The Client ID you copied from OneLogin. * **Client Secret**: The Client Secret you copied from OneLogin. * **Domain**: Optional. The domain to be used for authentication, only for users who must connect via a custom domain. - * **API CLIENT ID** and **API CLIENT SECRET**: Used for [team sync]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) only. For details, see the [official documentation](https://developers.onelogin.com/api-docs/1/getting-started/working-with-api-credentials). + * **API CLIENT ID** and **API CLIENT SECRET**: Used for [team sync]({{site.baseurl}}/docs/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) only. For details, see the [official documentation](https://developers.onelogin.com/api-docs/1/getting-started/working-with-api-credentials){:target="\_blank"}. {:start="3"} 1. Select **+ Add**. Codefresh generates the client name. Note this down. @@ -97,9 +105,10 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- max-width="100%" %} - +{:start="4"} +1. Continue with [Step 3: Set up login and redirect URIs in OneLogin](#step-3-set-up-login-and-redirect-uris-in-onelogin). -### Set up login and redirect URIs +## Step 3: Set up login and redirect URIs in OneLogin Go back to the OneLogin dashboard. @@ -115,10 +124,11 @@ file="/images/administration/sso/onelogin/step8.png" url="/images/administration/sso/onelogin/step8.png" alt="Login and Redirect URI" caption="Login and Redirect URI" -max-width="30%" +max-width="50%" %} -You have now completed SSO setup for OneLogin. +You have now completed SSO setup for OneLogin via OIDC. -#### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. \ No newline at end of file +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/single-sign-on/saml-setup.md b/_docs/single-sign-on/saml-setup.md new file mode 100644 index 000000000..afd68dbe5 --- /dev/null +++ b/_docs/single-sign-on/saml-setup.md @@ -0,0 +1,141 @@ +--- +title: "Setting up SAML2 Federated SSO" +description: "SAML2 Federated Single Sign-On (SSO) setup" +group: single-sign-on +redirect_from: + - /docs/sso/sso-setup-saml2/ + - /docs/enterprise/single-sign-on/sso-setup-saml2/ +toc: true +--- + +As Identity Providers (IdPs) come in all shapes and sizes, this topic discusses in general what you must do to configure Federated SSO for SAML. + As you will see in the description below, the person in your organization responsible for managing your IdP will need to interact with Codefresh support to successfully set up a trust between your IdP and Codefresh as an SP. + +{:.text-secondary} +## Before you set up Federated SSO + 1. Have your account set up with Codefresh enterprise plan. + 2. Ensure you have a working SAML 2.0 compliant identity provider (IdP). + 3. Identify someone in your organization who is familiar with configuring and managing your organization's IdP. + 4. Ensure that your IdP's system clock is synchronized with a reliable time source. If it's not, tokens generated will be unusable and SSO will fail. + +{:.text-secondary} +### Summary of Federated SSO setup + +{% include image.html + lightbox="true" + file="/images/sso-flow.png" + url="/images/sso-flow.png" + alt="sso-flow.png" + max-width="100%" +%} + +{:.text-secondary} +### SAML attributes + +Codefresh expects the following user attributes to be passed through SAML between your IdP and Codefresh SP: + - User email address + - User first name + - User last name + - User full name + - User unique ID that isn't subject to change in your identity management environment + +{:.text-secondary} +### How does the connection process work? + + {% include image.html +lightbox="true" +file="/images/sso-diagram.png" +url="/images/sso-diagram.png" +alt="sso-diagram.png" +max-width="100%" + %} + +Once Federated SSO has been configured, the process works as follows: + +
                + + Steps 2 to 7 occur in the background and are transparent to the user. +
                + +1. A user logs in to Codefresh and enters the email address. +2. The user is redirected to the Codefresh Service Provider (SP) to initiate SSO. +3. The user’s browser is then redirected to the customer IdP. +4. Once authenticated by the corporate side, a SAML token is sent to the user’s browser. +5. The SAML assertion is then forwarded to Codefresh SP. +6. If you are a valid Codefresh user for this SSO connection, an SSO token is returned to the user’s browser. +7. The user’s browser then returns a token to Codefresh and access is granted for your account. + +## SAML SSO configuration in Codefresh + +Here's what you need to do to configure SSO via SAML in Codefresh: + +1. Configure SSO settings for the IdP in Codefresh: + This generally includes defining settings in both in Codefresh and in the IdP. + Codefresh supports SAML SSO for the following: + * [JumpCloud]({{site.baseurl}}/docs/single-sign-on/saml/saml-jumpcloud) + * [Okta]({{site.baseurl}}/docs/single-sign-on/saml/saml-okta) + * [OneLogin]({{site.baseurl}}/docs/single-sign-on/saml/saml-onelogin) + * [PingID](({{site.baseurl}}/docs/single-sign-on/saml/saml-pingid) + + Notes for SSO via SAML: + **SSO settings** + + * Assertion URL + The Assertion URL which is the Service Provider SSO endpoint, also referred to as the Callback URL or Client ID, is generated _after_ you create the integration. + + * Provider + Currently, we support GSuite for SAML. If you are using a different provider, leave this field empty. + For GSuite, you can define the sync settings, Admin Email and the JSON Keyfile. + For instructions, see [Google SSO]({{site.baseurl}}/docs/single-sign-on/sso-google/#synchronize-teams-with-the-codefresh-cli). + + > These settings are for the SaaS version of Codefresh. For an on-premises setup, use the URLs that match your installation. + +1. Test integration with the IdP + + >Before enabling SSO for users, you **MUST** make sure that it is working for the test user. If SSO is enabled for a user, Codefresh blocks logins through other IDPs for this user and only the enabled SSO is allowed. If the selected SSO method does not work for some reason, the user will be locked out of Codefresh. + + 1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. + 1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. + 1. Add an active user to be used for testing. We recommend you use your own user. + 1. Change Login method by selecting your Auth provider from the SSO drop-down. + + {% include image.html + lightbox="true" + file="/images/administration/sso/collaborators.png" + url="/images/administration/sso/collaborators.png" + alt="Adding collaborators" + caption="Adding collaborators" + max-width="70%" + %} + + {:start="5"} + 1. Keep the current browser session open, and log in via Corporate SSO in an incognito tab (or another browser). + + {% include image.html + lightbox="true" + file="/images/administration/sso/sign-with-sso.png" + url="/images/administration/sso/sign-with-sso.png" + alt="Sign-in with SSO" + caption="Sign-in with SSO" + max-width="50%" + %} + +1. (Optional) [Set an IdP as the default provider]({{site.baseurl}}/docs/single-sign-on/team-sync/#set-a-default-sso-provider-for-account) + You can select an IdP as the default SSO provider for a Codefresh account. This means that all the new users added to that account will automatically use the selected IdP for signin. +1. (Optional) [Set the SSO method for each user]({{site.baseurl}}/docs/single-sign-on/team-sync/#select-sso-method-for-individual-users) + You can also select if needed, a different SSO provider for every user or for specific users. + +> Codefresh has an internal cache for SSO configuration, and it can take up to five minutes for your changes to take effect. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on) + + + + + + + + + + diff --git a/_docs/single-sign-on/saml/saml-jumpcloud.md b/_docs/single-sign-on/saml/saml-jumpcloud.md new file mode 100644 index 000000000..bd7d2f735 --- /dev/null +++ b/_docs/single-sign-on/saml/saml-jumpcloud.md @@ -0,0 +1,84 @@ +--- +title: JumpCloud SSO via SAML +description: Set up JumpCloud via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for JumpCloud using SAML. +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Set up SAML SSO for JumpCloud by: +1. Configuring SSO settings for JumpCloud via SAML in Codefresh +1. Configuring SSO settings for Codefresh in JumpCloud +1. Completing SSO configuration for JumpCloud in Codefresh + +## Step 1: Configure SSO settings for JumpCloud via SAML in Codefresh + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. In the sidebar, from Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any arbitrary name for this integration. + * **IDP Entry**: Type in any character. You will enter the correct value from JumpCloud in the final step. + * **Application Certificate**: You will enter the correct value from JumpCloud in the final step. +1. If GSuite is your provider, select it as the **Provider**, and define the settings below. Otherwise leave the field empty. + * **Admin Email**: The email of the user with access to `admin.google.com`. + * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. + * **Sync interval**: Optional. The time interval at which to sync. + * **Sync Field**: Optional. TBD + * **JSON Keyfile**: . TBD +1. Click **Add**. + The SAML integration for JumpCloud is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the JumpCloud SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 2: Configure SSO settings for Codefresh in JumpCloud](#step-2-configure-sso-settings-for-codefresh-in-jumpcloud). + +## Step 2: Configure SSO settings for Codefresh in JumpCloud + + +1. In JumpCloud, go to **User Authentication > SSO**. +1. To configure Codefresh as a new application, click **+**. +1. Select **Custom SAML**. +1. Add a **Display Label** for the application you will create. +1. Click the **SSO** tab, and enter the following: + 1. **IDP Entity ID**: Enter the user-defined or generated Client Name from Codefresh. For example, `gujNGnhXTSmK`. + > Make sure there no spaces before the name when copying and pasting. + 1. **SP Entity ID**: `g.codefresh.io`. + 1. **ACS URL**: Enter the Assertion URL (Callback URL) generated in Codefresh. + 1. **Login URL**: Enter the Assertion URL without the `/callback`. + 1. **IDP URL**: Add a custom name or leave the default. You will need the value to complete the SSO configuration in Codefresh. + 1. **Attributes**: Add the following: + - **email**: email + - **firstName**: firstname + - **lastName**: lastname + 1. Click **Activate** and **Continue**. +1. When you get a notification on the top right to download the Certificate, download the Certificate. +1. Continue with [Step 3: Complete SSO configuration for JumpCloud in Codefresh](#step-3-complete-sso-configuration-for-jumpcloud-in-codefresh). + +## Step 3: Complete SSO configuration for JumpCloud in Codefresh +As the final step in configuring SSO for JumpCloud, add the IDP Entry and Certificate values from JumpCloud. + +1. **IDP Entry**: The IDP URL from the SSO tab in Jump Cloud. +1. **Application Certificate**: Copy and paste the content between `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` from the certificate you downloaded into the field. You can also include the BEGIN and END lines. + - **Note**: You will get a warning when editing the Certificate section. +1. Click **Save**. + +You have completed SSO integration for JumpCloud via SAML in Codefresh. + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/single-sign-on/saml/saml-okta.md b/_docs/single-sign-on/saml/saml-okta.md new file mode 100644 index 000000000..e404c2dd1 --- /dev/null +++ b/_docs/single-sign-on/saml/saml-okta.md @@ -0,0 +1,84 @@ +--- +title: Okta SSO via SAML +description: Setting up Okta via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for OKta using SAML. +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Setting up SAML SSO for Okta includes: +1. Configuring SSO settings for Okta via SAML in Codefresh +1. Configuring SSO settings for Codefresh in JumpCloud +1. Completing SSO configuration for JumpCloud in Codefresh + +## Step 1: Configure SSO settings for Okta via SAML in Codefresh +Create a SAML account for Okta in Codefresh to create an integration for Codefresh in Okta. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any name you want for the integration. + * **IDP Entry**: Type in any character. You will enter the correct value from Okta in the final step. + * **Application Certificate**: You will enter the correct value from Okta in the final step. +1. Click **Add**. + The SAML integration for Okta is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the Okta SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 2: Configure SSO settings for Codefresh in Okta](#configure-sso-settings-for-codefresh-in-okta). + +## Step 2: Configure SSO settings for Codefresh in Okta + +1. Navigate to **Applications**. +1. Select **Create App Integration > SAML2.0**, and click **Next**. +1. General Settings: + - Fill in the Name and any other settings you need. + - Click **Next**. +1. Configure SAML: + - **Single Sign On URL**: + - **ACS URL**: Enter the Assertion URL (Callback URL) generated in Codefresh. + - **Audience URL**: `g.codefresh.io` + - **Name ID Format**: `EmailAddress` + - Attribute Statements + - Leave **Name Format** as Unspecified + - **firstName**: `user.firstName` + - **lastName**: `user.lastName` + - **email**: `user.email` + - Click **Next**. +1. Feedback: + - If displayed, complete the form. + - Click **Finish**. +1. Sign On Tab: + - Select **View SAML Setup Instructions** on the right. + - Keep the page open as you will need it to complete the setup for Okta in Codefresh. +1. Continue with [Step 3: Configure SSO settings for Codefresh in Okta](#configure-sso-settings-for-codefresh-in-okta). + + +## Step 3: Complete SSO configuration for Okta in Codefresh +Complete SSO setup for Okta via SAML in Codefresh. + +1. **IDP Entry**: The IDP URL from the SSO tab in Okta. +1. **Application Certificate**: Copy and paste the content between `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` from the certificate you downloaded into the field. You can also include the BEGIN and END lines. + - **Note**: You will get a warning when editing the Certificate section. +1. Click **Save**. + +You have completed SSO integration for OKta via SAML in Codefresh. + +## Test SSO connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/single-sign-on/saml/saml-onelogin.md b/_docs/single-sign-on/saml/saml-onelogin.md new file mode 100644 index 000000000..1da7b7613 --- /dev/null +++ b/_docs/single-sign-on/saml/saml-onelogin.md @@ -0,0 +1,83 @@ +--- +title: OneLogin via SAML +description: Setting Up OneLogin via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for OneLogin using SAML in Codefresh. +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Setting up SAML SSO for OneLogin includes: +1. Adding the Codefresh application in OneLogin +1. Configuring SSO settings for OneLogin via SAML in Codefresh +1. Configuring SSO settings for Codefresh in OneLogin + +## Step 1: Add Codefresh application in OneLogin + +1. From the OneLogin toolbar, **Applications** section,and then select **Add App** on the top right. +1. Search for **SAML Custom Connector (advanced)** and select it. +1. Add a **Display Name**. Leave the other settings which are optional. +1. Click **Save**. +1. From the sidebar, select **SSO** and keep the tab open. +1. Continue with [Step 2: Configure SSO settings for OneLogin via SAML in Codefresh](#configure-sso-settings-for-onelogin-via-saml-in-codefresh). + +## Step 2: Configure SSO settings for OneLogin via SAML in Codefresh + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any arbitrary name for this integration. + * **IDP Entry**: SAML 2.0 Endpoint (HTTP) from the SSO section in OneLogin. + * **Application Certificate**: X.509 Certificate from the SSO section in OneLogin. + * Click and open **View Details**, preferably in a new tab. + * Under X.509 Certificate, click **Copy**. + * Paste the content into the Application Certificate. + * Remove the lines, `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. +1. Click **Add**. + The SAML integration for OneLogin is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the OneLogin SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 3: Configure SSO settings for Codefresh in OneLogin](#configure-sso-settings-for-codefresh-in-onelogin). + +## Step 3: Configure SSO settings for Codefresh in OneLogin + +1. Return to OneLogin, and from the sidebar, select **Configuration**. +1. Enter the following: + * **Audience** (EntityID): `g.codefresh.io`. + * **Recipient**: The Assertion URL you copied in the previous step. + * **ACS (Consumer) URL Validator**: The Assertion URL in Regex format. For more info on this, view OneLogin's [Setup Page](https://onelogin.service-now.com/support?id=kb_article&sys_id=c89fefdadb2310503de43e043996195a&kb_category=93e869b0db185340d5505eea4b961934){:target="\_blank"}. + * **ACS (Consumer) URL**: The Assertion URL. + * **Login URL**: `https://g.codefresh.io/login` + * **SAML Initiator**: Service Provider. + * Click **Save**. +1. In OneLogin, go to the [Users](https://cfsupport.onelogin.com/users) page, and do the following: + * Select the User. + * Go to **Applications**, and click **+**. + * Select the SAML App with the Display Name you entered in Codefresh. + * Click **Continue**. + * Make sure the **NameID** is set to the email address. + * Click **Save**. + +You have completed SSO integration for OneLogin via SAML. + + + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) diff --git a/_docs/single-sign-on/saml/saml-pingid.md b/_docs/single-sign-on/saml/saml-pingid.md new file mode 100644 index 000000000..56d462136 --- /dev/null +++ b/_docs/single-sign-on/saml/saml-pingid.md @@ -0,0 +1,89 @@ +--- +title: PingID SSO via SAML +description: Setting up PingID SSO via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for PingID using SAML in Codefresh. +> The configuration described here is for PingID SSO and not PingID Federate. The steps can be used as a general guide for Ping Federate. + +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Setting up SAML SSO for PingID includes: +1. Configuring SSO settings for PingID via SAML in Codefresh +1. Configuring SSO settings for Codefresh in PingID +1. Completing SSO configuration for PingID in Codefresh + + +## Step 1: Configure SSO settings for PingID via SAML in Codefresh + +Configure SSO for PingID via SAML in Codefresh. The Assertion URL is automatically generated when you add the integration. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any arbitrary name for this integration. + * **IDP Entry**: Type in any character. You will enter the correct value from PingID in the final step. + * **Application Certificate**: Type in any character. You will enter the correct value from PingID in the final step. + * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. +1. Click **Add**. + The SAML integration for PingID is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the PingID SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 2: Configure SSO settings for Codefresh in PingID](#configure-sso-settings-for-codefresh-in-pingid). + + +## Step 2: Configure SSO settings for Codefresh in PingID + + +1. Log in to PingID and select the **Environment**. +1. Select **Connections > Applications**. +1. To add Codefresh as a new application, click **+**. +1. Enter the **Application Name** and **Description**. +1. Select **SAML Application** and then click **Configure**. +1. Select **Manually Enter** and define the following: + - **ACS URL**: The Assertion URL you copied from Codefresh. + - **Entity ID**: `g.codefresh.io`. +1. Click **Save**. +1. Go to the **Configuration** tab. +1. Download the X509 Certificate or Metadata. +1. Click **Attribute Mappings**, and add the following mappings + - **email**:Email address + - **firstName**: Given name + - **lastName**: Family name + + > For PingID Federate, you must add the follwing mapping: NameID <- Email Address + +1. Toggle the **Enable** option to on to make the application available. +1. Continue with [Step 3: Complete SSO configuration for PingID in Codefresh](#complete-sso-configuration-for-pingid-in-codefresh). + + +## Step 3: Complete SSO configuration for PingID in Codefresh +As the final step in configuring SSO for PingID, add the IDP Entry and Certificate values from PingID. + +1. **IDP Entry**: The IDP URL from the SSO tab in Jump Cloud. +1. **Application Certificate**: Copy and paste the content between `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` from the certificate you downloaded into the field. You can also include the BEGIN and END lines. + - **Note**: You will get a warning when editing the Certificate section. +1. Click **Save**. + +You have completed SSO integration for PingID via SAML in Codefresh. + + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/administration/single-sign-on.md b/_docs/single-sign-on/single-sign-on.md similarity index 75% rename from _docs/administration/single-sign-on.md rename to _docs/single-sign-on/single-sign-on.md index bf75ea995..5df776caf 100644 --- a/_docs/administration/single-sign-on.md +++ b/_docs/single-sign-on/single-sign-on.md @@ -1,20 +1,20 @@ --- title: "Federated Single Sign-On (SSO) overview" description: "" -group: administration +group: single-sign-on redirect_from: - /docs/sso/federated-sso-overview/ - /docs/enterprise/single-sign-on/ toc: true --- - Customers in our **enterprise plan** can log in to Codefresh, using Federated Single Sign-On (SSO). To learn more, please [contact sales](https://codefresh.io/contact-sales/). + Customers in our **enterprise plan** can log in to Codefresh using Federated Single Sign-On (SSO). To learn more, please [contact sales](https://codefresh.io/contact-sales/){:target="\_blank"}. + + Federated identity management enables the cross organizational exchange of identity information across Internet domains, without migrating credential information or consolidating several security domains. With federation, customers can authenticate with their corporate credentials to gain access to Codefresh. - Federated identity management enables the cross organizational exchange of identity information across Internet domains, without migrating credential information or consolidating several security domains. With federation, customers can authenticate with their corporate credentials to gain access to Codefresh. This means that you can sign in to your Codefresh account using your corporate credentials. - To set up Federated SSO, your identity management organization must use either of the following: - - **A Security Assertion Markup Language 2.0 (SAML 2.0)** compliant Identity Provider (IdP), configured to communicate with Codefresh Service Provider (SP). For example, ADFS, Auth0, Okta and Ping Identity. + - **A Security Assertion Markup Language 2.0 (SAML 2.0)** compliant Identity Provider (IdP), configured to communicate with the Codefresh Service Provider (SP). For example, ADFS, Auth0, Okta and Ping Identity. - **OpenID Connect (OAuth 2.0)** identity management. For example, Google, GitHub, Bitbucket and GitLab. @@ -22,8 +22,8 @@ Asserting the identity of the user to Codefresh ensures seamless SSO from a brow A SAML2-based federated system comprises: - - **Identity Provider (IdP)**: The identity provider belongs to the corporation that manages accounts for a large number of users who need secure internet access to the services or Web-based applications of another organization. In our case, it's a customer's organization that requires access to Codefresh. - - The IdP manages the corporate users, and integrates with Identity Management systems in the customer's organization responsible for authentication. The Identity Management systems integrate with authentication providers such as LDAP or AD. + - **Identity Provider (IdP)**: The identity provider belongs to the corporation that manages accounts for a large number of users who need secure internet access to the services or web-based applications of another organization. In our case, it's the customer's organization that requires access to Codefresh. + - The IdP manages the corporate users, and integrates with Identity Management systems in the customer's organization responsible for authentication. The Identity Management systems integrate with authentication providers such as LDAP or AD(Active Directory). - All user authentication is carried out via Identity Management systems integrated with the IdP. - For successfully authenticated users, the IdP sends a SAML assertion to the Codefresh service provider that enables the user to access Codefresh. @@ -33,8 +33,8 @@ A SAML2-based federated system comprises: A trust must be set up between the customer IdP and Codefresh as an SP. Once the trust has been set up, and a user has been authenticated via the IdP using corporate credentials, the user can access the Codefresh platform. -{:.text-secondary} -### Why use Federated SSO + +## Why use Federated SSO Using federated SSO significantly simplifies cross-domain user management as follows: @@ -43,8 +43,6 @@ Using federated SSO significantly simplifies cross-domain user management as fol * Corporate credentials aren't exposed to the SaaS provider. -### What to read next +## What to read next [Setting Up SAML2 Federated Single Sign-On (SSO)](sso-setup-saml2) -[Setting Up OpenID Connect Federated Single Sign-On](sso-setup-oauth2) - - +[Setting Up OpenID Connect Federated Single Sign-On](sso-setup-oauth2) diff --git a/_docs/single-sign-on/team-sync.md b/_docs/single-sign-on/team-sync.md new file mode 100644 index 000000000..d5a7d6c9b --- /dev/null +++ b/_docs/single-sign-on/team-sync.md @@ -0,0 +1,166 @@ +--- +title: Common configuration for SSO providers +description: "Set up team sync, select default SSO provider" +group: single-sign-on +toc: true +--- + +Once you create an SSO provider account in Codefresh, you can +* Automatically or manually sync between the teams created in Codefresh and your Identity Provider (IdP) +* Set a default SSO provider for your account +* Select an SSO provider for each user + + +## Syncing teams with IdPs +Team sync enables all users of the team + +You can sync teams: +* Automatically, in the Codefresh UI when you set up the SSO account for the IdP, through the **Auto-sync team** option. For details, see the SSO setup for your IdP. +* Manually, through the [synchronize teams command](https://codefresh-io.github.io/cli/teams/synchronize-teams/) via the [Codefresh CLI](https://codefresh-io.github.io/cli/) + +> Team-sync is supported for OIDC providers. For SAML, team-sync is supported only for Google. + + +Example: + +To sync your Azure teams, run: + + +```shell +codefresh synchronize teams -t azure +``` +where: +`` is the Client Name/Assertion URL/Callback URL that is automatically generated by Codefresh when you save the SSO configuration for your provider. + + +{% include image.html +lightbox="true" +file="/images/administration/sso/azure/client-name.png" +url="/images/administration/sso/azure/client-name.png" +alt="SSO Client Name" +caption="SSO Client Name" +max-width="40%" +%} + + +Though you can run this command manually it makes more sense to run it periodically as a job. And the obvious +way to perform this is with a Codefresh CI pipeline. The CLI can be used as a [freestyle step]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/). + +You can create a git repository with a [codefresh.yml]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) file with the following content: + +```yaml +version: '1.0' +steps: + syncMyTeams: + title: syncTeams + image: codefresh/cli + commands: + - 'codefresh synchronize teams my-client-name -t azure' +``` + +To fully automate this pipeline you should set a [cron trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/cron-triggers/) for it. The cron-trigger will run this pipeline (and therefore synchronize the teams) in a fully automated manner. + +This way you can synchronize your teams every day/week/hour depending on you Cron trigger setup. + +### CLI synchronize teams command + +If the `Restrict inviting additional users by email address domain` is enabled for your account, running the `synchronize teams` command via the CLI, does not invite new users to Codefresh. The output of the command will look similar to the following: + +```json +[ + { + "action": "update", + "teams": [ + { + "team": "developers", + "members": [ + { + "members": [], + "action": "create" + } + ] + }, + { + "team": "DevOps", + "members": [ + { + "members": [], + "action": "create" + } + ] + } + ] + } +] +``` + +**Turn off the domain restriction**: + +1. Navigate to **Account Settings > User & Teams > Security** +1. Toggle off **Restrict inviting additional users by email address domain**. +1. Click **Save**. +1. Rerun the sync command. + +### Sync GitHub Organization Teams to Codefresh + +As an admin, you may want to sync your GitHub Organization Teams with your Codefresh account. At the same time, you do not want to set up an SSO provider and have the users use any login provider they choose. + +The Personal Access Token (PAT) from a user will sync ALL Organizations and ALL Teams to which the user has access. It is recommended to use a "machine" account to access the one organization you need. + +1. Create a PAT that has access to read organizations and teams +1. Install and configure the Codefresh CLI + + `codefresh synchronize teams github -t github --tk $GHTOKEN` + +1. The sync will invite all users except for those that have private email settings turned on. + +Once the initial sync happens, you can set up a cron trigger pipeline to run the command on a schedule. + +## Set a default SSO provider for account + +If you have multiple SSO providers, you can set one of them as the default provider for your account. +Setting a default provider assigns the selected SSO automatically to all new users. The link in the email invitation takes them directly to the login page of that SSO provider. + +1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on). +1. From the list, select the SSO account to set as default and click the **Edit** icon on the right. +1. Scroll down and select **Set as default**. + + + +## Select SSO method for individual users + +In addition to setting a default provider for your account, you can select a different provider for each user if so required. +* New users + If you have an SSO provider selected as the default, that provider is automatically assigned to new users, added either manually or via team synchronization. + +* Existing users + SSO login is not configured by default for existing users. You must _explicitly select_ the SSO provider for existing users. + If SSO login is already configured for an existing user, and you add a new identity provider, to change the SSO login to the new provider, you must _select_ the new provider for the user. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select the SSO provider from the SSO list. + +{% include image.html +lightbox="true" +file="/images/administration/sso/select-user-sso.png" +url="/images/administration/sso/select-user-sso.png" +alt="Selecting SSO method" +caption="Selecting SSO method" +max-width="50%" +%} + +## Related articles +[Setting up OIDC Federated SSO]({{site.baseurl}}/docs/single-sign-on/oidc) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) + + + diff --git a/_docs/terms-and-privacy-policy/privacy-policy.md b/_docs/terms-and-privacy-policy/privacy-policy.md index b61dfc261..d3be33495 100644 --- a/_docs/terms-and-privacy-policy/privacy-policy.md +++ b/_docs/terms-and-privacy-policy/privacy-policy.md @@ -2,6 +2,8 @@ title: "Privacy Policy" description: "" group: terms-and-privacy-policy +redirect_from: + - /docs/privacy-policy/ toc: true --- **CODEFRESH PRIVACY POLICY ("PRIVACY POLICY")** @@ -53,4 +55,3 @@ You may exercise the above rights by sending a request to [privacy@codefresh.io] **12. Questions**.If you have any questions about this Privacy Policy or concerns about the way we process your PII, please contact us at [privacy@codefresh.io](mailto:privacy@codefresh.io). If you wish to delete all information regarding your use of the Solution, please contact us at: [ ](mailto:support@codefresh.io)[privacy @codefresh.io](mailto:support@codefresh.io). Last Date Updated: June 3, 2019 - diff --git a/_docs/terms-and-privacy-policy/sla.md b/_docs/terms-and-privacy-policy/sla.md index 9661c5ea0..5edb1f260 100644 --- a/_docs/terms-and-privacy-policy/sla.md +++ b/_docs/terms-and-privacy-policy/sla.md @@ -5,12 +5,7 @@ group: terms-and-privacy-policy toc: true --- -This Service Level Agreement (“**SLA**”) sets forth the terms and conditions under which Codefresh will provide service levels to Licensee and Users pursuant to the applicable Terms of Service in effect between Codefresh and Licensee and/or Users (the “**Terms**”). - -{::nomarkdown} -SLA Response Times -

                -{:/} +This Service Level Agreement (“**SLA**”) sets forth the terms and conditions under which Codefresh will provide service levels to Licensee and Users pursuant to the applicable Terms of Service in effect between Codefresh and Licensee and/or Users (the “**Terms**”). **1. DEFINITIONS**. The following definitions will apply to this SLA. All capitalized terms not defined in this SLA will have the meaning given them in the Terms. @@ -53,10 +48,7 @@ Percentage”). Issues with the Codefresh Platform are classified based on severity of the issues and the required resources needed to resolve them. Codefresh will designate the severity and impact of the reported issue as defined below and will use commercially reasonable efforts to respond and commence working on the applicable issue in accordance with the time periods below. -{::nomarkdown} -3.1. SLA Response Times. -

                -{:/} +**3.1. SLA Response Times**. | Priority | Silver* | Gold* | Platinum** | | -------------- | ---------------------------- | -----------------| -----------------| diff --git a/_docs/testing/automatic-preview-environments.md b/_docs/testing/automatic-preview-environments.md new file mode 100644 index 000000000..163c0fb23 --- /dev/null +++ b/_docs/testing/automatic-preview-environments.md @@ -0,0 +1,65 @@ +--- +title: "Dynamic preview environments" +description: "Preview test environments" +group: testing +toc: true +--- +If your service is one of many microservices, after running automated tests on your service, you would probably want to check the new service with your whole system. In this case, you can launch the composition of your system as part of your build, and at the end of the build, open the composition. + +## Prerequisites + +Complete the tutorials for: +* [Creating a basic pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) +* [Creating temporary environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) + +## Launch the composition + +{:start="1"} +1. Open your `codefresh.yml` file and add a new step: +```yaml +launch_composition_step: + title: "Launch full composition with latest images" + type: launch-composition + composition: your-composition-name + fail_fast: false +``` +1. Commit and push the changes to Git repository. +1. Build your service with Codefresh. +1. In the Codefresh UI, from the Artifacts section on the sidebar, select **Compositions**, and then select the **Running Compositions** tab. + The new preview environment is displayed in the list of Running Compositions. + +## Launch an environment on single branch + +There is a limit to the number of environments you can run concurrently. That's why it's a good practice to launch the composition only on a certain condition. Usually the most relevant condition is the branch, since you probably want your environment to be updated on the main branch. + +The following instructions describe how to launch the environment for only the `master` branch: + +{:start="1"} +1. Open your `codefresh.yml` file and add to the `launch_composition_step` the following: +```yaml +when: + branch: + only: + - master +``` +1. Commit and push changes to your Git repository's `master` branch. +1. Build your service with Codefresh on branch `master`. +1. Create a new branch and push it to your Git repository under a new branch. +1. Build your service with Codefresh on the new branch. +1. When the build completes execution, open its log. + You should see something similar to the example below. + +{% include image.html +lightbox="true" +file="/images/testing/dynamic-preview-environment.png" +url="/images/testing/dynamic-preview-environment.png" +alt="Launch environment on single branch" +caption="Launch environment on single branch" +max-width="70%" +%} + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating compositions]({{site.baseurl}}/docs/testing/create-composition/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Service containers]({{site.baseurl}}/docs/pipelines/service-containers/) diff --git a/_docs/testing/composition-service-discovery.md b/_docs/testing/composition-service-discovery.md new file mode 100644 index 000000000..aa2cc537f --- /dev/null +++ b/_docs/testing/composition-service-discovery.md @@ -0,0 +1,98 @@ +--- +title: "Composition Service Discovery (Experimental)" +description: "" +group: testing +redirect_from: + - /docs/integration-tests/ + - /docs/on-demand-test-environment/composition-service-discovery/ +toc: true +--- +Codefresh enables you to launch multiple instances of the same composition utilizing Docker. +This is possible, in part, because Docker can map ports from a container to a random port on a host. Following from this are some basic questions: + * If your container is mapped to a random port, how can you predict the URL of the application? + * How can you configure your web application's container with a "Base URL"? + * How can you reference other services without linking to them? + +{{site.data.callout.callout_info}} +##### Availability + +This feature is available for users with a Pro subscription.
                +Already a Pro subscriber? Contact us to opt into this feature. +{{site.data.callout.end}} + +Codefresh utilizes `Dynamic Composition Service Routing` to deliver seamless service discovery within a composition. + +After you enable this feature for your account, Codefresh injects the URLs of every service in your composition as environment variables. + +## What do these environment variables look like? + +Every service URL environment variable is prefixed with `CF_URL_` and is uniquely identifiable using the service name: +`CF_URL_SERVICENAME=http://foo.cf-cd.com/unique-url`. + +If your service exposes multiple ports, an environment variable will be injected for every port, and will be comprised of a combination of the service name and the service's port: +`CF_URL_SERVICENAME_PORTNUMBER=http://foo.cf-cd.com/unique-url-1`. + +Also, every service would be having a domain that would direct to each service. The domains can be uniquely identifiable with environments variables: +`CF_DOMAIN_SERVICENAME=unique-prefix.foo.cf-cd.com`. + +For services that exposes multiple ports the environment variable should have the port number as a suffix: +`CF_DOMAIN_SERVICENAME_PORTNUMBER=unique-prefix-1.foo.cf-cd.com`. + +{{site.data.callout.callout_warning}} +##### Note: + +The unique identifier is the service's name, not the name of the container that was produced for the service. +{{site.data.callout.end}} + +## Example + +Consider the following composition: + + `Composition.yml` +{% highlight yaml %} +version: '3' +services: + db: + image: postgres + api: + image: myorg/api + ports: + - 9000 + links + - db + web: + image: myorg/web + ports: + - 80 + - 8080 +{% endhighlight %} + +The `db` service does not expose any ports. The `api` service exposes port `9000`, and the `web` service exposes port `80` and port `8080`. + +So, every container produced by this composition will be injected with: + +``` +CF_URL_API=http://foo.cf-cd.com/someurl +CF_URL_WEB_80=http://foo.cf-cd.com/someurl2 +CF_URL_WEB_8080=http://foo.cf-cd.com/someurl3 + +CF_DOMAIN_API=some-name.foo.cf-cd.com +CF_DOMAIN_WEB_80=some-name2.foo.cf-cd.com +CF_DOMAIN_WEB_8080=some-name3.foo.cf-cd.com +``` + +This means you can also discover these URLs within your application. +For example, within a Node.js application you can run the following routine: + + `Find all service URLs` +{% highlight javascript %} +const allVars = process.env; +const allUrls = Object.keys(allVars) + .filter(envVarKey => envVarKey.startsWith('CF_URL_')) + .reduce((obj, envVarKey) => { + obj[envVarKey] = allVars[envVarKey]; + return obj; + }, {}); +{% endhighlight %} + +The `allUrls` object will retain all the injected URL environment variables. diff --git a/_docs/testing/create-composition.md b/_docs/testing/create-composition.md new file mode 100644 index 000000000..cecbe179b --- /dev/null +++ b/_docs/testing/create-composition.md @@ -0,0 +1,270 @@ +--- +title: "Creating compositions" +description: "Create environment configurations in Codefresh" +group: testing +toc: true +--- + +Compositions can be launched as part of a unit test step, an integration test step, or to run an image for manual testing. You can create compositions from scratch or import an existing `docker-compose.yml` file. + +## Create composition +1. In the Codefresh UI, from the Artifacts section in the sidebar, click [**Compositions**](https://g.codefresh.io/compositions){:target="\_blank"}. +1. Click **Create Composition**. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/add-composition-first.png" +url="/images/testing/compositions/add-composition-first.png" +alt="Add composition" +captiom="Add composition" +max-width="70%" +%} + +{:start="3"} +1.In the **Composition Name** text box, type a name for your composition, and click **Next**. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/composition-name.png" +url="/images/testing/compositions/composition-name.png" +alt="Composition name" +caption="Composition name" +max-width="70%" +%} + +{:start="4"} +1. Select the type of composition to create: + * **From file in repo**: Start a new composition from a Docker Compose file in your repository. + * **From template**: Use a template as a starting point for the composition, if your repository doesn't include a `docker-compose.yml` file. + * **Empty composition**: Create a composition from scratch. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/composition-method.png" +url="/images/testing/compositions/composition-method.png" +alt="Composition starting point" +caption="Composition starting point" +max-width="70%" +%} + +{:start="5"} +1. Click **Next**, and continue with one of the following: + * [From file in repo**](#from-file-in-repo) + * [From template](#from-template) + * [Empty composition](#empty-composition) (Advanced) + + +### From file in repo +Start a new composition from a Docker Compose file in your repository. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/composition-file-in-repo.png" +url="/images/testing/compositions/composition-file-in-repo.png" +alt="Add composition from file in repo" +caption="Add composition from file in repo" +max-width="70%" +%} + +1. To search for the repo, in the search box, type the name of the repository. + OR + Click **Add by URL**, and then enter the URL of the repo. +1. From the **Branch** dropdown, select a branch for the first build. +1. Click **Next**. +1. Enter the path to `docker-compose.yml`. + By default, Codefresh searches for your `docker-compose.yml` at the root level of your repository, for the name _`docker-compose.yml`_. If your `docker-compose.yml` is in a subdirectory, provide the path as well, for example, `./foo/bar/docker-compose.yml`. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/path-to-docker-compose.png" +url="/images/testing/compositions/path-to-docker-compose.png" +alt="Path to Compose file" +caption="Path to Compose file" +max-width="70%" +%} + +{:start="5"} +1. Click **Next**. + >We don’t support the `build` property of Docker Compose. We will replace it with images automatically using a pipeline that is automatically created. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/replace-build.png" +url="/images/testing/compositions/replace-build.png" +alt="Replacing build with image" +caption="Replacing build with image" +max-width="70%" +%} + +{:start="6"} +1. Click **Create**. + Once ready, your composition is displayed in the Compositions list. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/composition-list.png" +url="/images/testing/compositions/composition-list.png" +alt="Composition list" +caption="Composition list" +max-width="70%" +%} + + + +### From template +If your repository doesn't include a `docker-compose.yml` file, use one of our templates to see how it works. + + +1. Choose the template. +1. Click **Create**. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/compose-from-template-select-template.png" +url="/images/testing/compositions/compose-from-template-select-template.png" +alt=" Select composition template" +caption=" Select composition template" +max-width="70%" +%} + + You will see the Composition editor, where you can tweak the template as needed, and then launch the composition to see results. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/compose-from-template-edit.png" +url="/images/testing/compositions/compose-from-template-edit.png" +alt="Edit selected template" +caption="Edit selected template" +max-width="70%" +%} + +> To launch this composition, click the rocket icon. + + +### Empty composition +Create a composition from scratch. + + +1. To add a service, click the **Add Service** button. + You can add existing services, or provide the name for the Docker image to be pulled from the Docker registry. + + {% include +image.html +lightbox="true" +file="/images/testing/compositions/empty-composition.png" +url="/images/testing/compositions/empty-composition.png" +alt="Empty composition" +caption="Empty composition" +max-width="70%" +%} + +{:start="2"} +1. (Optional) Click **Edit**, and modify the content based on [Docker Compose YAML ](https://docs.docker.com/compose/compose-file/){:target="_blank"}. +1. Click **Save** on the upper-right corner. + + + +## Working with existing compositions + +You can edit any composition you created, regardless of the method used to create it, by selecting it from the Compositions list. +Edit as needed in the YAML editor. + + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/existing-composition.png" +url="/images/testing/compositions/existing-composition.png" +alt="Edit existing composition" +caption="Edit existing composition" +max-width="70%" +%} + +## Manually launching compositions + +When you are ready with the composition, launch it to inspect your application. Launching a composition creates a temporary +test environment in your Codefresh account that you can use to inspect your application. + +1. In the Codefresh UI, from the Artifacts section in the sidebar, click [**Compositions**](https://g.codefresh.io/compositions){:target="\_blank"}. +1. From the list, select the composition to launch. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/composition-list.png" +url="/images/testing/compositions/composition-list.png" +alt="Composition list" +caption="Composition list" +max-width="70%" +%} + +{:start="3"} +1. Click the **Launch** icon. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/composition-launch-button.png" +url="/images/testing/compositions/composition-launch-button.png" +alt="Launch composition" +caption="Launch composition" +max-width="70%" +%} + +{:start="4"} +1. To verify that the launch completed successfully, review the log. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/composition-launch-log.png" +url="/images/testing/compositions/composition-launch-log.png" +alt="Composition log" +caption="Composition log" +max-width="70%" +%} + +## Sharing the environment URL + +View a record for the running environment and all containers for the environment in the Running Compositions tab. + +1. In the Codefresh UI, from the Artifacts section in the sidebar, click [**Compositions**](https://g.codefresh.io/compositions){:target="\_blank"}. +1. Click **Running Compositions**. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/environment-running.png" +url="/images/testing/compositions/environment-running.png" +alt="environment-running.png" +max-width="70%" +caption="Active test environments" +alt="Active test environments" +%} + +{:start="3"} +1. To share your environment with your team, click the **Hashtag** icon. + +{% include +image.html +lightbox="true" +file="/images/testing/compositions/share-environment-link.png" +url="/images/testing/compositions/share-environment-link.png" +alt="Link for sharing environment" +caption="Link for sharing environment" +max-width="70%" +%} + +## Related articles +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Creating test reports]({{site.baseurl}}/docs/testing/test-reports/) diff --git a/_docs/testing/integration-tests.md b/_docs/testing/integration-tests.md new file mode 100644 index 000000000..c4a8bb0f7 --- /dev/null +++ b/_docs/testing/integration-tests.md @@ -0,0 +1,448 @@ +--- +title: "Integration tests" +description: "Launch additional services in Codefresh pipelines" +group: testing +redirect_from: + - /docs/integration-tests/ + - /docs/integration-test-script/ + - /docs/testing/run-unit-test-with-db-composition/ + - /docs/run-unit-test-with-db-composition/ +toc: true +--- + +Simple [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) that rely only on the source code of the application, are very easy to execute in Codefresh, using only [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/). For integration tests however, you usually need to launch either the application itself, or one or more external services, such as a database. + +Codefresh offers two ways of launching sidecar containers (similar to `docker compose`) within the pipeline: + +1. [Compositions]({{site.baseurl}}/docs/pipelines/steps/composition/) is the old (but still supported) way +1. [Service containers]({{site.baseurl}}/docs/pipelines/service-containers/) is the new and more flexible way + +For brand-new pipelines, we suggest you use service containers. +They are much more flexible than compositions in these areas: +1. Guaranteeing the order of service launch and their dependencies (a feature that is not even offered by vanilla `docker compose`). +1. Using a special Docker image to preload data to a database, or otherwise initialize a service before running tests. +1. Attaching service containers to the whole pipeline instead of individual steps . +1. Auto-mounted Codefresh [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) for freestyle steps (unlike compositions), making file access very easy. You can execute your tests from the Git repository that was cloned. + +>This article explains how to run additional services that are automatically discarded once the pipeline has completed its run. If you are interested in temporary test environments, see the [preview environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/). + +## How integration tests work in Codefresh + +* Service containers and `docker compose` + Service containers work similar to `docker compose`. A set of containers are launched on the same network with configurable hostnames and ports. Once they are up, you decide what to do, with a freestyle step that is part of the network as well. In the most typical pipeline, you can use your existing testing framework, regardless of the programming language, in the same manner as you would run your tests locally. + +* No hard-coded hostnames + A best practice is to make sure that the hostnames used by your integration tests to access external services are not hard-coded. Even though with Codefresh you can decide on the hostnames used in the pipeline, that is, the hostname of a MySQL or Redis instance, in the long run, it is better if you can choose that information freely without being limited to and by what is specified in the source code. + +* No `localhost` for an API endpoint + Make sure that your tests do **NOT** use `localhost` for an API endpoint. This technique does not work with `docker compose`, and will not work with Codefresh either. + Instead, use the hostname defined in the `docker-compose/codefresh.yml` file. For example, if you launch a MySQL service at hostname `my_db`, then your tests should use `my_db:3306` as a target. + Even better, make the hostname completely configurable with an environment variable so that you can change it within the Codefresh pipeline at will. + Basically, make sure that your integration tests work fine with `docker compose` locally on your workstation, before converting them to a Codefresh pipeline. + +>The services you launch in a Codefresh pipeline consume resources (memory/CPU) from the pipeline's runtime environment. The more services you launch, the less resources you have for the actual pipeline. We also suggest that you do **NOT** use service containers for load or performance testing. + +## Running integration tests directly from source code + +The simplest way to run integration tests is to check out the source code of your tests and launch the services that they need. + +{% + include image.html + lightbox="true" + file="/images/testing/integration-testing/from-source-code.png" + url="/images/testing/integration-testing/from-source-code.png" + alt="Testing directly from source code" + caption="Testing directly from source code" + max-width="70%" +%} + +This is a very popular way of running integration tests but not the most flexible one. It works only when your tests have very simple requirements on their testing environment. It also doesn't make a clear distinction on source code that gets shipped to production with source code that is used only for testing. Make sure that you don't fall into the common trap of shipping testing tools with your Docker container in production. + +Here is the pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-npm-example" + revision: "master" + git: github + my_deps: + image: 'node:11' + title: "Download Deps" + commands: + - 'npm install' + my_tests: + image: 'node:11' + title: "Integration tests" + commands: + - 'npm test' + services: + composition: + my_redis: + image: 'redis:latest' + ports: + - 6379 +{% endraw %} +{% endhighlight %} + +We suggest using this technique only if your application is not Dockerized yet, that is, you don't deploy it with a Docker image to production. + +See more examples with [MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/), or [Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/). + +## Running tests after launching the application + +A better approach, that mimics what happens in reality, is to launch your application as a Docker image, and then run tests against it. This approach is only possible if you have adopted containers as deployment artifacts: + +{% + include image.html + lightbox="true" + file="/images/testing/integration-testing/to-app.png" + url="/images/testing/integration-testing/to-app.png" + alt="Launching the application to be tested" + caption="Launching the application to be tested" + max-width="70%" +%} + +This technique is only limited by your pipeline resources. +If you have not adopted microservices, it might be difficult to launch a huge monolith as part of a Codefresh pipeline (remember that service containers use the same resources as the pipeline). +But for simple applications, this method ensures that your tests actually hit the running application. + +Here is the pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-example" + revision: "master" + git: github + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "my-java-app" + tag: "master" + dockerfile: "Dockerfile" + my_tests: + image: 'maven:3.5.2-jdk-8-alpine' + title: "Integration tests" + commands: + - 'mvn -Dmaven.repo.local=/codefresh/volume/m2_repository integration-test' + services: + composition: + my_postgres: + image: 'postgres:11.5' + ports: + - 5432 + app: + image: '${{build_app_image}}' + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +See more examples in [launching the application]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/), or [Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/). + +## Using a custom test image + +In all the previous examples, the integration tests ran in a public Dockerhub image that has the programming language/framework that your tests require. In more complex cases, you might need to create your own Docker image that contains exactly the tools that you wish. + +In this case, you can create a special Docker image that will be used just for testing and nothing else. + +{% + include image.html + lightbox="true" + file="/images/testing/integration-testing/special-image.png" + url="/images/testing/integration-testing/special-image.png" + alt="Using a dedicated testing image" + caption="Using a dedicated testing image" + max-width="70%" +%} + +It is very easy to create a test image as part of a pipeline and then reference it for integration tests. +Here is the pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-app-example" + revision: "master" + git: github + build_app_image: + title: "Building App Docker Image" + type: "build" + image_name: "my-web-app" + tag: "master" + dockerfile: "Dockerfile" + build_test_image: + title: "Building Test Docker Image" + type: "build" + image_name: "my-test-image" + tag: "master" + dockerfile: "Dockerfile.testing" + my_tests: + image: '${{build_test_image}}' + title: "Integration tests" + commands: + - 'sh ./my-tests.sh' + services: + composition: + my_postgres: + image: 'postgres:11.5' + ports: + - 5432 + app: + image: '${{build_app_image}}' + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +This is the recommended way to run integration tests in Codefresh. It creates a clear distinction between the source code that gets shipped to production and the source code that is needed only for tests. It also allows you to define what the test environment will look like (maybe you need multiple or exotic testing tools that are not available in Docker Hub). + +See more examples using a separate testing image [for the application]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) or [a MySQL instance]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/). + +## Integration tests for microservices + +If you have enough pipeline resources, you can keep adding service containers that form a complex running environment. Service containers support [launch dependency order]({{site.baseurl}}/docs/pipelines/service-containers/#checking-readiness-of-a-service) as well as [post-launch phases]({{site.baseurl}}/docs/pipelines/service-containers/#preloading-data-to-databases), making them feasible for any complex infrastructure you have in mind. + +{% + include image.html + lightbox="true" + file="/images/testing/integration-testing/complex-tests.png" + url="/images/testing/integration-testing/complex-tests.png" + alt="Microservice testing" + caption="Microservice testing" + max-width="70%" +%} + +Here is the pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-app-example" + revision: "master" + git: github + build_frontend_image: + title: "Building Frontend Docker Image" + type: "build" + image_name: "my-web-app" + working_directory: './frontend' + tag: "master" + dockerfile: "Dockerfile" + build_backend_image: + title: "Building Backend Docker Image" + type: "build" + image_name: "my-backend-app" + working_directory: './backend' + tag: "master" + dockerfile: "Dockerfile" + build_test_image: + title: "Building Test Docker Image" + type: "build" + image_name: "my-test-image" + tag: "master" + dockerfile: "Dockerfile.testing" + my_tests: + image: '${{build_test_image}}' + title: "Integration tests" + commands: + - 'sh ./my-tests.sh' + services: + composition: + my_postgres: + image: 'postgres:11.5' + ports: + - 5432 + redis_ds: + image: 'redis:latest' + ports: + - 6379 + backend: + image: '${{build_backend_image}}' + ports: + - 9000 + frontend: + image: '${{build_frontend_image}}' + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +Keep in mind that extra services use memory from the pipeline itself, so if you follow this route, make sure that the pipeline runs in the appropriate runtime environment. + +## Running service containers for the whole pipeline + +In most cases service containers should be only attached to the pipeline step that uses them. + +{% + include image.html + lightbox="true" + file="/images/testing/integration-testing/single-scope.png" + url="/images/testing/integration-testing/single-scope.png" + alt="Service containers for individual steps" + caption="Service containers for individual steps" + max-width="60%" +%} + +Doing so not only helps with pipeline resources (as service containers are discarded when they are not needed), but also allows you to mix and match different service containers for different steps. + +Here is an example pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-app-example" + revision: "master" + git: github + build_backend_image: + title: "Building Backend Docker Image" + type: "build" + image_name: "my-backend-app" + working_directory: './backend' + tag: "master" + dockerfile: "Dockerfile" + backend_tests: + image: 'maven:3.5.2-jdk-8-alpine' + title: "Running Backend tests" + commands: + - 'mvn -Dmaven.repo.local=/codefresh/volume/m2_repository integration-test' + services: + composition: + backend: + image: '${{build_backend_image}}' + ports: + - 9000 + build_frontend_image: + title: "Building Frontend Docker Image" + type: "build" + image_name: "my-web-app" + working_directory: './frontend' + tag: "master" + dockerfile: "Dockerfile" + my_tests: + image: 'node:11' + title: "Running front-end tests" + commands: + - 'npm test' + services: + composition: + frontend: + image: '${{build_frontend_image}}' + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +In some cases however, you would like to execute multiple steps with integration tests that share the same environment. In this case +you can also launch service containers at the beginning of the pipeline to make them available to all pipeline steps: + +{% + include image.html + lightbox="true" + file="/images/testing/integration-testing/multi-scope.png" + url="/images/testing/integration-testing/multi-scope.png" + alt="Service containers for the whole pipeline" + caption="Service containers for the whole pipeline" + max-width="60%" +%} + +You can use this technique by putting the service container definition [at the root of the pipeline]({{site.baseurl}}/docs/pipelines/service-containers/#running-services-for-the-duration-of-the-pipeline) instead of within specific step. + +Here is an example that follows this technique: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_database + composition: + my-redis-ds: + image: redis:latest + ports: + - 6379 + my_postgres: + image: 'postgres:11.5' + ports: + - 5432 +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-app-example" + revision: "master" + git: github + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "my-web-app" + tag: "master" + dockerfile: "Dockerfile" + my_api_tests: + image: '${{build_app_image}}' + title: "Running API tests" + commands: + - 'npm run test' + my_fuzzy_tests: + image: 'node:11' + title: "Fuzzy testing" + commands: + - 'npm run fuzzy-tests' + my_e2e_tests: + image: cypress/base + title: "Running E2E tests" + commands: + - 'cypress run' +{% endraw %} +{% endhighlight %} + +The Redis and PostgreSQL instances are now available to all pipeline steps. Read all about test results and graphs in [test reports]({{site.baseurl}}/docs/testing/test-reports/). + + +## Creating test reports + +All the techniques shown above are also applicable to test reports. + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/sample-test-report.png" +url="/images/pipeline/test-reports/sample-test-report.png" +alt="Sample Allure test report" +caption="Sample Allure test report" +max-width="70%" +%} + + +## Related articles +[Service containers]({{site.baseurl}}/docs/testing/unit-tests/) +[Run integration tests with MongoDB]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +[Run integration tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Run integration tests with PostgreSQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Run integration tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) +[Run unit tests]({{site.baseurl}}/docs/example-catalog/ci-examples/run-unit-tests) diff --git a/_docs/testing/security-scanning.md b/_docs/testing/security-scanning.md new file mode 100644 index 000000000..469a44215 --- /dev/null +++ b/_docs/testing/security-scanning.md @@ -0,0 +1,199 @@ +--- +title: "Security scanning" +description: "Scan for vulnerabilities with Codefresh pipelines" +group: testing +toc: true +--- + +Codefresh can integrate with any security scanning platform that scans source code or Docker images for vulnerabilities. + +The integration can happen via a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) as long as the scanning solution offers any of the following: + +* A Docker image with the scanner +* A CLI that can be packaged in a Docker image +* An API + +Since all security solutions offer an API, Codefresh can essentially use any scanning solution via that interface. + + +## Existing security integrations + +Codefresh already offers Docker images for the following security platforms: + +* [Anchore](https://codefresh.io/steps/step/anchore){:target="\_blank"} +* [Aqua Security](https://codefresh.io/steps/step/aqua){:target="\_blank"} +* [Clair](https://codefresh.io/steps/step/paclair){:target="\_blank"} +* [Twistlock](https://codefresh.io/steps/step/twistlock){:target="\_blank"} +* [WhiteSource](https://codefresh.io/steps/step/whitesource){:target="\_blank"} + +You can find more integrations as they are added in the [plugin directory](https://codefresh.io/steps/){:target="\_blank"}. + + +## Security scanning strategies + +Because you can insert a scanning step anywhere in your pipeline, you have great flexibility on when to start a security scan. +Common strategies are: +1. Scanning the source code before being packaged in a container +1. Scanning a container before being stored in a registry +1. Scanning a container before being deployed to production +1. A combination of the above + + +Here is an example pipeline that scans a Docker image: +* With [Aqua](https://www.aquasec.com/){:target="\_blank"} after being pushed to the [default Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/#the-default-registry) +* Before it is promoted to the [external Azure Registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/azure-docker-registry/){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/testing/security-scanning/aqua-scan.png" +url="/images/testing/security-scanning/aqua-scan.png" +alt="Scanning a Helm release with Aqua" +caption="Scanning a Helm release with Aqua" +max-width="100%" +%} + + +Here's the full pipeline definition: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} + +version: '1.0' +stages: + - prepare + - build + - test + - push + - deploy +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: prepare + build: + title: "Building Docker Image" + type: "build" + image_name: "${{CF_ACCOUNT}}/${{CF_REPO_NAME}}" + tag: ${{CF_REVISION}} + dockerfile: "Dockerfile" + stage: "build" + AquaSecurityScan: + title: 'Aqua Private scan' + image: codefresh/cfstep-aqua + stage: test + environment: + - 'AQUA_HOST=${{AQUA_HOST}}' + - 'AQUA_PASSWORD=${{AQUA_PASSWORD}}' + - 'AQUA_USERNAME=${{AQUA_USERNAME}}' + - IMAGE=${{CF_ACCOUNT}}/${{CF_REPO_NAME}} + - TAG=${{CF_REVISION}} + - REGISTRY=codefresh + push: + title: "Pushing image to Azure registry" + type: "push" + stage: push + image_name: "${{CF_ACCOUNT}}/${{CF_REPO_NAME}}" + registry: "myazureregistry" + candidate: "${{build}}" + tags: + - "${{CF_BRANCH_TAG_NORMALIZED}}" + - "${{CF_REVISION}}" + - "${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}" + + createpullsecret: + title: "Allowing cluster to pull Docker images" + image: codefresh/cli + stage: "deploy" + commands: + - codefresh generate image-pull-secret --cluster 'mydemok8scluster' --registry myazureregistry + deploy: + image: codefresh/cfstep-helm:2.12.0 + stage: deploy + environment: + - CHART_REF=deploy/helm/colors + - RELEASE_NAME=color-coded + - KUBE_CONTEXT=mydemok8scluster + - CUSTOM_service.type=LoadBalancer + - CUSTOM_deployment[0].track=release + - CUSTOM_deployment[0].image.repository=registry3435454.azurecr.io/${{CF_REPO_OWNER}}/${{CF_REPO_NAME}} + - CUSTOM_deployment[0].image.tag="${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}" + - CUSTOM_deployment[0].image.version="${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}" + - CUSTOM_deployment[0].image.pullSecret=codefresh-generated-registry3435454.azurecr.io-myazureregistry-default +{% endraw %} +{% endhighlight %} + +The security scanning step is inserted after building the Docker image, but before promoting the image to the Azure Docker registry. + + +## Viewing security reports + +The easiest way to view security reports is to visit the portal/dashboard of the security platform that you are using. + +{% include image.html +lightbox="true" +file="/images/testing/security-scanning/snyk-test-report.png" +url="/images/testing/security-scanning/snyk-test-report.png" +alt="Snyk Security Analysis" +caption="Snyk Security Analysis" +max-width="60%" +%} + +You can also attach Analysis Reports to Codefresh builds using the [test reporting feature]({{site.baseurl}}/docs/testing/test-reports/). + +{% highlight yaml %} +{% raw %} + ArchiveClairReport: + title: Upload Clair Report + image: codefresh/cf-docker-test-reporting + environment: + - REPORT_DIR=reports + - REPORT_INDEX_FILE=clair-scan.html +{% endraw %} +{% endhighlight %} + +In this example, we attach the Clair Scan report to the build created: + +{% include image.html +lightbox="true" +file="/images/testing/security-scanning/security-test-results.png" +url="/images/testing/security-scanning/security-test-results.png" +alt="Attaching scanning results to a build" +caption="Attaching scanning results to a build" +max-width="100%" +%} + +To view the full report, click **Test Report**: + +{% include image.html +lightbox="true" +file="/images/testing/security-scanning/clair-scan.png" +url="/images/testing/security-scanning/clair-scan.png" +alt="Clair security scan" +caption="Clair security scan" +max-width="60%" +%} + + +## Security annotations + +Security scan results are also a perfect candidate for [extra metadata]({{site.baseurl}}/docs/docker-registries/metadata-annotations/) to add to your Docker images. + +{% include image.html +lightbox="true" +file="/images/testing/security-scanning/security-annotations.png" +url="/images/testing/security-scanning/security-annotations.png" +alt="Security annotations" +caption="Security annotations" +max-width="80%" +%} + +You can add any metadata such as the number of issues for each category or even the URL the full report. This allows you to easily correlate docker images in Codefresh and security results of your scanning platform. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Creating test reports]({{site.baseurl}}/docs/testing/test-reports/) +[Advanced workflows in pipelines]({{site.baseurl}}/docs/pipelines/advanced-workflows/) diff --git a/_docs/testing/sonarqube-integration.md b/_docs/testing/sonarqube-integration.md new file mode 100644 index 000000000..e33472042 --- /dev/null +++ b/_docs/testing/sonarqube-integration.md @@ -0,0 +1,140 @@ +--- +title: "SonarQube scanning" +description: "Trigger a SonarQube Analysis from Codefresh" +group: testing +toc: true +--- + +[SonarQube](https://www.sonarqube.org/){:target="\_blank"} is a popular platform for Code Quality. It can be used for static and dynamic analysis of a codebase to detect common code issues such as bugs and vulnerabilities. + + +{% include image.html +lightbox="true" +file="/images/testing/sonarqube/sonarqube-logo.png" +url="/images/testing/sonarqube/sonarqube-logo.png" +alt="SonarQube logo" +max-width="40%" +%} + +There are many ways to perform an [analysis with SonarQube](https://docs.sonarqube.org/latest/setup/overview/){:target="\_blank"}, but the easiest way is to use one that matches the build system of your application. + +This article shows how to use the [SonarQube plugin](https://codefresh.io/steps/step/sonar-scanner-cli){:target="\_blank"} on Codefresh from the plugin directory. Once it is set up, your code is automatically analysed everytime your pipeline runs. + +## Prerequisites for SonarQube scanning + +Before starting an analysis, you need to have a: + + * Simple [Codefresh pipeline up and running]({{site.baseurl}}//docs/getting-started/create-a-basic-pipeline/) + * SonarQube account (Developer, Enterprise, or on the [SonarCloud](https://sonarcloud.io/){:target="\_blank"}) + +## Get a security token from SonarQube + +To use the SonarQube plugin, you need to provide your login credentials in your Codefresh pipeline or generate a security token. We recommend the latter. You can either create a new token or reuse an existing one. Security-wise, it is best if each project has its own token. + +1. Log in into your account in SonarQube. +1. Navigate to **USER > MY ACCOUNT**, which is on the top-right corner of your profile. +1. Select the **Security** tab, and generate the security token. +1. Save the token where you can access it again easily. + +{% include image.html +lightbox="true" +file="/images/testing/sonarqube/generate-token.png" +url="/images/testing/sonarqube/generate-token.png" +alt="SonarQube generate token" +max-width="50%" +%} + +## Set up sonar-project.properties file + +Set up a `sonar-project.properties` file in our root directry. This is needed as not all environment variables are currently [automatically defined](https://github.com/SonarSource/sonar-scanner-cli-docker/pull/50){:target="\_blank"} in the SonarScanner. + +1. Create a `sonar-project.properties` file. +1. Add the following values: + +{% highlight yaml %} +# must be unique in a given SonarQube instance +sonar.projectKey=a unique project key + +# project name +sonar.projectName=your project's name +{% endhighlight %} + +The file is needed to run the SonarQube plugin. + +**Language-specific settings** +Note that projects using specific languages may require additional configuration. For more information, see the appropriate language page in the [Sonarqube documentation](https://docs.sonarqube.org/latest/analysis/languages/overview/){:target="\_blank"}. + + +## Run an analysis from the Codefresh Plugin + +If you are using the predefined Codefresh pipeline, you just need to look-up SonarQube under `STEPS` and you will find the custom plugin. + +{% include image.html +lightbox="true" +file="/images/testing/sonarqube/simplified-codefresh-pipeline.png" +url="/images/testing/sonarqube/simplified-codefresh-pipeline.png" +alt="SonarQube analysis for predefined Codefresh steps" +max-width="80%" +%} + +* Select the `sonar-scanner-cli` +* Copy and past the step to your pipeline +* Customise the values within the step as follows: + * `SONAR_HOST_URL: 'https://sonarcloud.io/'` # this is the URL to SonarCloud, if applicable, please replace it with the Server URL + * `SONAR_LOGIN: username` or access token (generated above) + * `SONAR_PASSWORD: password` if username is used + * `SONAR_PROJECT_BASE_DIR: set working directory for analysis` + * `SONAR_SCANNER_CLI_VERSION: latest` +* Save and run your pipeline. + + +Here is our example step: + +{% highlight yaml %} + sonarqube: + type: "sonar-scanner-cli" + stage: "push" + arguments: + SONAR_HOST_URL: 'https://sonarcloud.io/' # replace with your host url + SONAR_LOGIN: "insert access token" # replace with your access token + SONAR_PROJECT_BASE_DIR: "/codefresh/volume/sonarqube-example" #r eplace with your working directory + SONAR_SCANNER_CLI_VERSION: "latest" +{% endhighlight %} + +## View the SonarQube analysis + +Once the Codefresh build starts, check the logs and monitor the progress of the analysis. + +{% include image.html +lightbox="true" +file="/images/testing/sonarqube/analysis-log.png" +url="/images/testing/sonarqube/analysis-log.png" +alt="SonarQube analysis" +max-width="80%" +%} + +Once the analysis is complete, go to the SonarQube dashboard and see the recent analysis of the project. + +{% include image.html +lightbox="true" +file="/images/testing/sonarqube/sonar-project.png" +url="/images/testing/sonarqube/sonar-project.png" +alt="SonarQube project" +max-width="80%" +%} + +Then you can drill down and view the various statistics. + +{% include image.html +lightbox="true" +file="/images/testing/sonarqube/sonar-analysis-details.png" +url="/images/testing/sonarqube/sonar-analysis-details.png" +alt="SonarQube Analysis details" +max-width="80%" +%} + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) diff --git a/_docs/testing/test-reports.md b/_docs/testing/test-reports.md new file mode 100644 index 000000000..7511ae9ee --- /dev/null +++ b/_docs/testing/test-reports.md @@ -0,0 +1,556 @@ +--- +title: "Creating test reports" +description: "Create and view test reports in Codefresh" +group: testing +redirect_from: + - /docs/configure-ci-cd-pipeline/test-reports/ +toc: true +--- + +Codefresh offers the capability to store test results for every build and view them at any point in time. + +Currently, Codefresh supports storing test reports in: +* [Google buckets](https://cloud.google.com/storage/docs/key-terms#buckets){:target="\_blank"} +* [S3 buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html){:target="\_blank"} +* [Azure Storage](https://docs.microsoft.com/en-us/azure/storage/){:target="\_blank"} +* [MinIO objects](https://min.io/){:target="\_blank"} + +## Test report modes + +There are two modes for processing test reports in Codefresh, built-in and custom test reporting + +1. Built-in test reporting based on the [Allure framework](http://allure.qatools.ru/){:target="\_blank"} + Allure is an open-source test framework that can produce HTML reports like the following: + + {% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/sample-test-report.png" +url="/images/pipeline/test-reports/sample-test-report.png" +alt="Sample Allure test report" +caption="Sample Allure test report" +max-width="70%" +%} + + For more details, see the [official Allure documentation](https://docs.qameta.io/allure/){:target="\_blank"}. + Allure supports popular testing frameworks such as: + * Java/JUnit/TestNG/Cucumber + * Python/pytest + * JavaScript/Jasmine/Mocha + * Ruby/Rspec + * Groovy/Spock + * .NET/Nunit/mstest + * Scala/Scalatest + * PHP/PhpUnit + +{:start="2"} +1. Custom reporting for any static website (HTML) content + If you use the custom reporting mode, you can select any kind of tool that you want, as long as it produces a static website in the end. You can also use the custom reporting mode for reports that are not test reports, such as security reports or quality reports. + +## Connecting your storage account + +As a first step, you need a cloud bucket to store your test results. You can use Google, AWS, Azure or MinIO for this purpose. +Codefresh creates subfolders in the bucket with the names from every build ID. It will then upload the reports for that build to the respective folder. Multiple pipelines can use the same bucket. + +1. In the Codefresh UI, on the toolbar, click the Settings icon, and then from the sidebar select **Pipeline Integrations**. +1. Scroll down to **Cloud Storage**, and click **Configure**. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/cloud-storage-integrations.png" +url="/images/pipeline/test-reports/cloud-storage-integrations.png" +alt="Cloud storage Integrations" +caption="Cloud storage Integrations" +max-width="80%" +%} + +1. Click **Add Cloud Storage**, and select your cloud provider. +1. Continue to define cloud settings according to your cloud provider, as described in the sections that follow. + +### Connecting a Google bucket + +1. Create a bucket either from the Google cloud console or the `gsutil` command line tool. + See the [official documentation](https://cloud.google.com/storage/docs/creating-buckets#storage-create-bucket-console){:target="\_blank"} for the exact details. +1. [Connect your storage account](#connecting-your-storage-account) for **Google Cloud Storage**. + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/cloud-storage-google.png" +url="/images/pipeline/test-reports/cloud-storage-google.png" +alt="Google cloud storage" +caption="Google cloud storage" +max-width="80%" +%} + +{:start="3"} +1. Define the settings: + * Select **OAuth2** as the connection method, which is the easiest way. + * Enter an arbitrary name for your integration. + * Select **Allow access to read and write into storage** as Codefresh needs to both write to and read from the bucket. +1. Click **Save**. +1. When Codefresh asks for extra permissions from your Google account, accept the permissions. + +The integration is ready. You will use the name of the integration as an environment variable in your Codefresh pipeline. + +> An alternative authentication method is to use **JSON Config** with a [Google service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey){:target="\_blank"}. + In that case, download the JSON file locally and paste its contents in the **JSON config** field. + For more information, see the [official documentation](https://cloud.google.com/iam/docs/creating-managing-service-account-keys){:target="\_blank"}. + +### Connecting an S3 bucket + +1. For AWS (Amazon Web Services), create an S3 bucket. + See the [official documentation](https://docs.aws.amazon.com/quickstarts/latest/s3backup/step-1-create-bucket.html){:target="\_blank"} or the [CLI](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-bucket.html){:target="\_blank"}. + +1. Note down the **Access** and **Secret** keys. +1. [Connect your storage account](#connecting-your-storage-account) for **Amazon Cloud Storage**. +1. Define the settings: + * Enter an arbitrary name for your integration. + * Paste the **AWS Access Key ID** and **AWS Secret Access Key**. +1. Click **Save**. + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/cloud-storage-s3.png" +url="/images/pipeline/test-reports/cloud-storage-s3.png" +alt="S3 cloud storage" +caption="S3 cloud storage" +max-width="80%" +%} + +You will use the name of the integration as an environment variable in your Codefresh pipeline. + +You can also use any [external secrets that you have defined]({{site.baseurl}}/docs/integrations/pipeline-integrations/secret-storage/) (such as Kubernetes secrets), as values, by clicking on the lock icon that appears next to field: +* If you have already specified the resource field during secret definition, just enter the name of the secret directly in the text field, for example, `my-secret-key`. +* If you didn't include a resource name during secret creation, enter the full name in the field, for example, `my-secret-resource@my-secret-key`. + +### Connecting Azure storage + +1. For Azure, create a storage account. + See the [official documentation](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-create){:target="\_blank"}. +1. Find one of the [two access keys](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage){:target="\_blank"} already created. +1. Note down the **Account Name** and **Access key for the account**. +1. [Connect your storage account](#connecting-your-storage-account) for **Azure File/Blob Storage**. +1. Define the settings: + * Enter an arbitrary name for your integration. + * Paste the **Azure Account Name** and **Azure Account Key**. +1. Click **Save**. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/cloud-storage-azure.png" +url="/images/pipeline/test-reports/cloud-storage-azure.png" +alt="Azure cloud storage" +caption="Azure cloud storage" +max-width="60%" +%} + +You will use the name of the integration as an environment variable in your Codefresh pipeline. + +You can also use any [external secrets that you have defined]({{site.baseurl}}/docs/integrations/pipeline-integrations/secret-storage/) (such as Kubernetes secrets), as values, by clicking on the lock icon that appears next to field: +* If you have already specified the resource field during secret definition, just enter the name of the secret directly in the text field, for example, `my-secret-key`. +* If you didn't include a resource name during secret creation, enter the full name in the field, for example, `my-secret-resource@my-secret-key`. + +### Connecting MinIO storage + +1. Configure the MinIO server. + See the [official documentation](https://docs.min.io/docs/minio-quickstart-guide.html){:target="\_blank"}. +1. Copy the Access and Secret keys. define the settings for MinIO cloud storage in your Codefresh account. +1. [Connect your storage account](#connecting-your-storage-account) for ****MinIO Cloud Storage**. +1. Define the settings: + * **NAME**: The name of the MinIO storage. Any name that is meaningful to you. + * **ENDPOINT**: The URL to the storage service object. + * **PORT**: Optional. The TCP/IP port number. If not defined, defaults to port `80` for HTTP, and `443` for HTTPS. + * **Minio Access Key**: The ID that uniquely identifies your account, similar to a user ID. + * **Secret Minio Key**: The password of your account. + * **Use SSL**: Select to enable secure HTTPS access. Not selected by default. +1. Click **Save**. + + {% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/cloud-storage-minio.png" +url="/images/pipeline/test-reports/cloud-storage-minio.png" +alt="MinIO cloud storage" +caption="MinIO cloud storage" +max-width="60%" +%} + +### Integration example in Codefresh pipeline +See an example of the integration in a pipeline: +```yaml +version: "1.0" +stages: + - "clone" + - "test" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "https://github.com/vadim-kharin-codefresh/test/" + revision: "master" + stage: "clone" + unit_test_reporting_step: + title: Upload Mocha test reports + image: codefresh/cf-docker-test-reporting + working_directory: "${{clone}}" + stage: "test" + environment: + - REPORT_DIR=mochawesome-report + - REPORT_INDEX_FILE=mochawesome.html + - BUCKET_NAME=codefresh-test-reporting + - CF_STORAGE_INTEGRATION=minio + - CF_BRANCH_TAG_NORMALIZED=test +``` + + +## Producing Allure test reports from Codefresh pipelines + +In order to obtain test reports with Allure, the general process of a pipeline is the following: + +1. Generate test results using Allure and store them in a folder named `allure-results` (which is the default name). +1. Copy this folder to the [Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) to make it available to the next pipeline step. +1. Use the special `cf-docker-test-reporting` pipeline step with a working directory for the folder that contains the `allure-results` subfolder. + +Let's see these requirements in order: + +### Collecting the Allure test results + +The first step is to run your unit/integration tests using Allure to gather the results. +The process is different for every programming language. Follow the [official Allure documentation](https://docs.qameta.io/allure/){:target="\_blank"}. You can also take a look at any of the [examples](https://github.com/allure-examples){:target="\_blank"}. + +By default, Allure creates a folder named `allure-results` containing all the tests. The Codefresh reporting step looks for that folder to upload it to the cloud storage. If you change the default name, then you also need to add an extra parameter in the Codefresh reporting step. + +To pass the reports to the next step, you need to place them anywhere in the [Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) that is automatically shared between all Codefresh steps. + +>You can also leave the test results in the project folder that was checked out from Git, as this folder is already inside the shared Codefresh volume. + +Therefore, once you create the reports with your testing framework, make sure to copy them to `{% raw %}${{CF_VOLUME_PATH}}{% endraw %}` which is the [Codefresh variable]({{site.baseurl}}/docs/pipelines/variables/) that points to the shared volume. + +An example for Node tests would be the following: + +{% highlight yaml %} +{% raw %} +running_tests: + image: node + title: Running Unit tests + commands: + - npm test + - cp -r -f ./allure-results $CF_VOLUME_PATH/allure-results +{% endraw %} +{% endhighlight %} + +Here the tests are executed and then they are copied to `/codefresh/volume/allure-results` + + +### Creating the Allure test reports + + +Once the test results are collected, the next step is the same, regardless of your programming language and test framework. + +{% highlight yaml %} +{% raw %} + unit_test_reporting_step: + title: Generate test reporting + image: codefresh/cf-docker-test-reporting + working_directory: '${{CF_VOLUME_PATH}}/' + environment: + - BUCKET_NAME=my-bucket-name + - CF_STORAGE_INTEGRATION=google +{% endraw %} +{% endhighlight %} + +Here, we execute the special `cf-docker-test-reporting` image as a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). The important point is that this image searches for `allure-results` in its working directory. This is why we pass `/codefresh/volume/` as the working directory as this is the parent folder of the test results. + +The required environment variables are: + * `BUCKET_NAME`, the name of the bucket that you created in your cloud provider. Multiple pipelines can use the same bucket. + * `CF_STORAGE_INTEGRATION`, the name of the cloud integration you created in the Codefresh UI. + +If you used another directory name, you can configure the test reporting step to include `ALLURE_DIR`, as in the example below: + +{% highlight yaml %} +{% raw %} + unit_test_reporting_step: + title: Generate test reporting + image: codefresh/cf-docker-test-reporting + working_directory: '${{CF_VOLUME_PATH}}/' + environment: + - ALLURE_DIR=my-own-allure-results-folder + - BUCKET_NAME=my-bucket-name + - CF_STORAGE_INTEGRATION=google +{% endraw %} +{% endhighlight %} + + +Once that is done, the results are uploaded to Codefresh infrastructure. You can access +the report for any build by clicking **Test Report** to the right of each build. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/test-report-button.png" +url="/images/pipeline/test-reports/test-report-button.png" +alt="Test report button" +caption="Test report button" +max-width="80%" +%} + +Note that behind the scenes, Codefresh automatically handles Allure history for you. For each test run, Codefresh finds the historical results from previous runs, and recreates them. Codefresh handles all folders inside the storage bucket, so make sure not to tamper with them. Make sure also that the account/role you are using for the bucket has delete privileges. + +## Using the custom mode for generic reporting + +If you don't want to use Allure or wish to create some other kind of report, you can use the alternative mode for the Codefresh reporting step. + +Here is an example for a custom reporting step via [Mocha](https://mochajs.org/){:target="\_blank"}. The reports are placed in the folder `/codefresh/volume/demochat/mochawesome-report`. + + +{% highlight yaml %} +{% raw %} + unit_test_reporting_step: + title: Upload Mocha test reports + image: codefresh/cf-docker-test-reporting + working_directory: /codefresh/volume/demochat/ + environment: + - REPORT_DIR=mochawesome-report + - REPORT_INDEX_FILE=mochawesome.html + - BUCKET_NAME=my-bucket-name + - CF_STORAGE_INTEGRATION=google +{% endraw %} +{% endhighlight %} + +The environment variables are: + * `BUCKET_NAME`, the name of the bucket that you created in your cloud provider. + * `CF_STORAGE_INTEGRATION`, the name of the cloud integration you created in the Codefresh UI. + * `REPORT_PATH`, the subfolder name in the bucket for each test report. + * Data is saved to the bucket in following path: `{bucketName}/{pipelineId}/{REPORT_PATH}/{branchName}/{buildId}/` + * `REPORT_DIR`, the name of the folder to be uploaded. + * `REPORT_INDEX_FILE`, the name of file to serve as the index file. + +In the above example, we define a non-Allure report directory and the file that serves as the index file. +Here is the result: + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/mocha-tests.png" +url="/images/pipeline/test-reports/mocha-tests.png" +alt="Custom reporting" +caption="Custom reporting" +max-width="70%" +%} + +In a similar manner, you can upload reports from any other custom tool you have in your pipeline. + +If your report is only one file, simply use the `REPORT_INDEX_FILE` environment variable on its own, as below: + +{% highlight yaml %} +{% raw %} + unit_test_reporting_step: + title: Upload single html file report + image: codefresh/cf-docker-test-reporting + working_directory: /codefresh/volume/my-app/ + environment: + - REPORT_INDEX_FILE=my-test-report/my-result.html + - BUCKET_NAME=my-bucket-name + - CF_STORAGE_INTEGRATION=google +{% endraw %} +{% endhighlight %} + + + +### Cleaning the reports from the previous run + +In the typical scenario, the tests are run, the results are collected and saved in a folder, and then Codefresh creates the report. + +If something goes wrong with the actual tests, once the Codefresh reporting step runs, it actually picks the old reports from the previous build. Remember that everything that is placed in the Codefresh volume is not only shared between build steps, but also persists between different builds of the same pipeline for caching purposes. + +If that is a problem for you, pass the extra `CLEAR_TEST_REPORT` environment variable to the reporting step. This deletes the previous test results once uploaded, so are not available to the subsequent build. + +Here is an example: + +{% highlight yaml %} +{% raw %} + unit_test_reporting_step: + title: Upload Mocha test reports + image: codefresh/cf-docker-test-reporting + working_directory: /codefresh/volume/demochat/ + environment: + - REPORT_DIR=mochawesome-report + - REPORT_INDEX_FILE=mochawesome.html + - CLEAR_TEST_REPORT=true + - BUCKET_NAME=my-bucket-name + - CF_STORAGE_INTEGRATION=google +{% endraw %} +{% endhighlight %} + +>In the Allure reporting mode, the test results are automatically cleared by Codefresh. There is no need to manually define the `CLEAR_TEST_REPORT` variable. + +## Creating multiple reports + +You can create multiple reports from a single pipeline. As an example, you can create +a single pipeline that creates two reports, one for code coverage, and the other one for security vulnerabilities. + +To achieve this, you only need to repeat the variables mentioned in this article with an index number that matches them to the report, `REPORT_DIR.0`, `REPORT_DIR.1`, `REPORT_DIR.2` and so on. + +The following variables can be indexed: + * `REPORT_DIR` + * `REPORT_INDEX_FILE` + * `ALLURE_DIR` + * `CLEAR_TEST_REPORT` + * `REPORT_TYPE` (explained later on) + +Here is an example of a pipeline that creates two reports, one for code coverage, and one for unit tests. Notice the index number (`.0` and `.1`) used in the variables. + + +{% highlight yaml %} +{% raw %} + unit_test_reporting_step: + title: Upload Mocha test reports + image: codefresh/cf-docker-test-reporting + working_directory: /codefresh/volume/demochat/ + environment: + - BUCKET_NAME=codefresh-test-report + - CF_STORAGE_INTEGRATION=testReporting + - REPORT_DIR.0=coverage + - REPORT_INDEX_FILE.0=lcov-report/index.html + - REPORT_TYPE.0=coverage + - ALLURE_DIR.1=allure-results + - REPORT_TYPE.1=allure +{% endraw %} +{% endhighlight %} + +This is the top-level HTML file created by the reporting step: + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/multiple-test-reports.png" +url="/images/pipeline/test-reports/multiple-test-reports.png" +alt="Multiple test reports" +caption="Multiple test reports" +max-width="60%" +%} + +The icons shown are specified by the `REPORT_TYPE` variable. The following options are possible: `allure, mocha, spock, coverage, junit, testng, cucumber, pytest, rspec, phpunit, nunit, spectest`. + +If you don't provide a `REPORT_TYPE`, Codefresh uses a default icon. + + +## Getting results from tests that fail + +By default, if unit tests fail, the pipeline stops execution. If you want the pipeline to keep running even if the tests fail, +add the [fail_fast property]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#execution-flow) to the pipeline and set it to `false`. + +Here is an example: + +{% highlight yaml %} +{% raw %} + RunMyUnitTests: + image: node:latest + title: Running my UnitTests + fail_fast: false + commands: + - npm run test +{% endraw %} +{% endhighlight %} + +The pipeline continue its run, and any steps later in the pipeline that collect reports, also run as usual, with access to test results. + +## Marking the whole pipeline as failed if tests failed + +When you use the `fail_fast:false` property in your pipeline, the pipeline "succeeds" even if the tests fail, because test results are essentially ignored. + +To fail the pipeline when tests fail, use [conditional execution]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/). + +As the last step in your pipeline, add the following step: + +{% highlight yaml %} +{% raw %} + MarkMyPipelineStatus: + image: alpine:latest + title: Marking pipeline status + commands: + - echo "Unit tests failed" + - exit 1 + when: + condition: + all: + myCondition: RunMyUnitTests.result == 'failure' +{% endraw %} +{% endhighlight %} + +This step checks the result of your unit tests, and stops the whole pipeline by exiting with an error, if the tests fail. +Replace `RunMyUnitTests` with the name of your step that runs unit tests. + +Here is a full pipeline example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + RunMyUnitTests: + image: alpine:latest + title: Running my UnitTests that will fail + fail_fast: false + commands: + - exit 1 #simulate test fail + CollectingMyTestresults: + image: alpine:latest + title: Collecting test results + commands: + - echo "collecting/copy test results" + MarkMyPipelineStatus: + image: alpine:latest + title: Checking Unit test result + commands: + - echo "Unit tests failed, marking the whole pipeline as failed" + - exit 1 + when: + condition: + all: + myCondition: RunMyUnitTests.result == 'failure' +{% endraw %} +{% endhighlight %} + +If you run this pipeline, you will see: + +1. The `RunMyUnitTests` will fail but the pipeline will continue +1. The `CollectingMyTestresults` step will always run even if tests fail +1. The `MarkMyPipelineStatus` step will mark the whole pipeline as failed + +## Running the test reporting step in parallel mode + +Test reporting works well with the [parallel pipeline mode]({{site.baseurl}}/docs/pipelines/advanced-workflows/), where each step +is evaluated any time there is a change in the workflow. + +Here is how you can define the test reporting step to run regardless of pipeline result: + +{% highlight yaml %} +{% raw %} + unit_test_reporting_step: + [...] + when: + condition: + any: + mySuccessCondition: workflow.status == 'success' + myFailureCondition: workflow.status == 'failure' +{% endraw %} +{% endhighlight %} + +See [handling errors in a pipeline]({{site.baseurl}}/docs/pipelines/advanced-workflows/#handling-error-conditions-in-a-pipeline) for more details. + + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Parallel workflows in pipelines]({{site.baseurl}}/docs/pipelines/advanced-workflows/) + diff --git a/_docs/testing/unit-tests.md b/_docs/testing/unit-tests.md new file mode 100644 index 000000000..eb387f667 --- /dev/null +++ b/_docs/testing/unit-tests.md @@ -0,0 +1,295 @@ +--- +title: "Unit tests" +description: "Run unit tests in Codefresh pipelines" +group: testing +redirect_from: + - /docs/unit-tests/ +toc: true +--- +Easily run unit tests for every commit or pull request. + +>For the purposes of this article, "unit tests" are the tests that use only the source code of the application and nothing else. If you are interested in running tests with external services (such as databases), then see [integration tests]({{site.baseurl}}/docs/testing/integration-tests/). + +Different companies have different types of unit tests, and in several cases, the type of programming language also affects when/what tests are run. Codefresh supports all testing frameworks (including mocking frameworks) for all popular programming languages. + +Here we will see four ways of running unit tests in Codefresh: + +1. Running unit tests in a Dockerfile (recommended only for smoke tests) +1. Running unit tests with an external image (best for traditional/simple applications) +1. Running unit tests in the application image (not recommended, but very popular) +1. Running unit tests using a special testing image (the recommended solution for complex applications) + +For an example application for 2 and 3, see [unit test examples]({{site.baseurl}}/docs/example-catelog/ci-examples/run-unit-tests/). + +## Running unit tests as part of a Docker build + +A handy way to quickly test a Docker image is by placing one or more smoke tests in the Dockerfile itself. The unit +tests are executed when the image is built, and if they fail, the image is not created. + +Here is an example: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM python:3.6.4-alpine3.6 + +ENV FLASK_APP=minitwit +COPY . /app +WORKDIR /app + +RUN pip install --editable . +RUN flask initdb + +# Unit tests +RUN python setup.py test + +EXPOSE 5000 +CMD [ "flask", "run", "--host=0.0.0.0" ] +{% endraw %} +{% endhighlight %} + +This kind of unit test is transparent to Codefresh. The unit tests just execute in a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) in the same manner as you would build the image on your workstation. + +{% + include image.html + lightbox="true" + file="/images/testing/unit-testing/unit-tests-in-dockerfile.png" + url="/images/testing/unit-testing/unit-tests-in-dockerfile.png" + alt="Unit tests inside a Dockerfile" + caption="Unit tests inside a Dockerfile" + max-width="80%" +%} + +A big disadvantage of this unit testing method is that getting reports from the Docker image is not a straightforward process. On the other hand, such unit tests are very easy to integrate in any workflow. The Codefresh pipeline simply checks out the code and builds a Dockerfile. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/python-flask-sample-app' + revision: 'with-tests-in-dockerfile' + stage: prepare + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: my-app-image + working_directory: ./ + tag: with-tests-in-dockerfile + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This technique is best used for a very small subset of unit tests that check the overall well-being of a Docker image. The bulk of the tests should be executed outside the Docker build process as we will see in the sections that follow. + +## Running unit tests using an external Docker image + +The recommended way to run unit tests in Codefresh pipelines is to select a Docker image that has all the test tools that you need, and define an explicit testing step in your pipeline, usually a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +Here is an example where unit tests are run using a JDK/Maven image: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - package +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + MyUnitTests: + title: JUnit tests + stage: test + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository test + MyAppDockerImage: + title: Building Docker Image + type: build + stage: package + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'non-multi-stage' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +The main advantage of this approach is that you can easily replicate your test environment in the Codefresh pipeline by selecting the appropriate image for your tests. You also get a clear overview on the test results. If they fail, the pipeline automatically stops execution. You can change this behavior with the [fail_fast]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#execution-flow) property. + +{% + include image.html + lightbox="true" + file="/images/testing/unit-testing/unit-tests-with-external-image.png" + url="/images/testing/unit-testing/unit-tests-with-external-image.png" + alt="Unit tests with external Docker image" + caption="Unit tests with external Docker image" + max-width="80%" +%} + +Notice that even if the example above eventually creates a Docker image, you can still use this way of running unit tests for traditional applications that are not dockerized yet, such as VM-based applications. + +## Running unit tests with the application Docker image + +In several cases, especially with dynamic languages, you can reuse the Docker image that holds the application also for unit tests. This is a very common technique for Node, Python, and Ruby applications. +In this case, you can use [context variables]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables) in Codefresh to run a unit test step in the image that was created in a previous step: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - test +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/python-flask-sample-app' + revision: 'master' + git: github + stage: prepare + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: my-app-image + working_directory: ./ + tag: 'master' + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + stage: test + image: '${{MyAppDockerImage}}' + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +We use a [Codefresh variable]({{site.baseurl}}/docs/pipelines/variables/) as the value of the `image` property in the last step. This will make the unit test execute in the same Docker container that was created in the second step of the pipeline. + +{% + include image.html + lightbox="true" + file="/images/testing/unit-testing/unit-tests-with-app-image.png" + url="/images/testing/unit-testing/unit-tests-with-app-image.png" + alt="Reusing the app image for unit tests" + caption="Reusing the app image for unit tests" + max-width="80%" +%} + +This technique is certainly useful, but can be easily abused if you end up shipping testing tools in your production image (which is not recommended). If you find your production images filled with test tools and libraries, it is better to use the technique in the next section which uses a different image for tests. + + +## Running unit tests with a dynamic Docker image + +The ultimate method of running unit tests in Codefresh is by creating a specific image, dedicated to unit tests. If Docker Hub doesn't already contain an image that suits you, you should create your own. + +This means that your application has *two Dockerfiles*. The main one that holds the application as a deployment artifact, and another one that holds all the unit test libraries and tools that you need. + +Here is an example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - 'Test tools' + - test + - build +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/demochat' + revision: 'master' + git: github + stage: prepare + MyUnitTestDockerImage: + title: Building Test image + type: build + stage: 'Test tools' + image_name: my-test-image + working_directory: ./ + tag: 'master' + dockerfile: Dockerfile.dev + MyUnitTests: + title: Running Unit tests + stage: test + image: '${{MyUnitTestDockerImage}}' + commands: + - npm run test + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: my-app-image + working_directory: ./ + tag: 'master' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Here we create two Docker images: + +1. The first docker image is created from `Dockerfile.dev`. + Unit tests run in the context of that image (`MyUnitTestDockerImage`). +1. The production application uses another Dockerfile. + +{% + include image.html + lightbox="true" + file="/images/testing/unit-testing/unit-tests-with-dedicated-image.png" + url="/images/testing/unit-testing/unit-tests-with-dedicated-image.png" + alt="Dedicated unit test image" + caption="Dedicated unit test image" + max-width="80%" +%} + +This is one of the best ways to run unit tests (as well as integration tests), as it allows you to fine-tune the test environment while still shipping only what is needed to production. + +In the example above, we used two different Dockerfiles, but you could also use a single Dockerfile with multi-stage builds. Use the `target` directive to stop the image build process at a previous layer that has all the testing tools. + +## Creating test reports + +All the methods mentioned above for running unit tests, apart from the first method, can also be used for reporting. Read all about test results and graphs in [test reports]({{site.baseurl}}/docs/testing/test-reports/). + +{% include +image.html +lightbox="true" +file="/images/pipeline/test-reports/sample-test-report.png" +url="/images/pipeline/test-reports/sample-test-report.png" +alt="Sample Allure test report" +caption="Sample Allure test report" +max-width="70%" +%} + + + +## Related articles +[Unit test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-unit-tests/) +[Introduction to pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[On demand environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) + + + + + diff --git a/_docs/troubleshooting/common-issues.md b/_docs/troubleshooting/common-issues.md index 525a88daf..700d6d69b 100644 --- a/_docs/troubleshooting/common-issues.md +++ b/_docs/troubleshooting/common-issues.md @@ -9,9 +9,43 @@ toc: true --- In this section, we offer helpful information about issues you may encounter and questions you might have. -## General usage issues -- [Non admin users can are thrown out back to classic Codefresh]({{site.baseurl}}/docs/troubleshooting/common-issues/non-admin-users-support/) +## Repository issues + +- [Can't find your organization repositories]({{site.baseurl}}/docs/troubleshooting/common-issues/cant-find-your-organization-repositories/) +- [Can't find your private repositories]({{site.baseurl}}/docs/troubleshooting/common-issues/cant-find-your-private-repositories/) +- [Clone step failed]({{site.baseurl}}/docs/troubleshooting/common-issues/git-clone-step-issue/) +- [Handling commit messages with quotes]({{site.baseurl}}/docs/troubleshooting/common-issues/handling-commit-messages-with-quotes/) + +## Docker issues + +- [The docker image does not exist or no pull access]({{site.baseurl}}/docs/troubleshooting/common-issues/the-docker-image-does-not-exist-or-no-pull-access/) +- [Build step: No such file or directory]({{site.baseurl}}/docs/troubleshooting/common-issues/build-step-no-such-file-or-directory/) +- [No Dockerfile found]({{site.baseurl}}/docs/troubleshooting/common-issues/no-dockerfile-found/) +- [Could not tag image](could-not-tag-image) +- [Failed to build image: non-zero code 137](error-code-137/) +- [Error pulling image configuration: toomanyrequests](dockerhub-rate-limit/) + +## Build/pipeline issues + +- [Restoring data from pre-existing image hangs on]({{site.baseurl}}/docs/troubleshooting/common-issues/restoring-data-from-pre-existing-image-hangs-on/) +- [Disabling codefresh caching mechanisms]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) +- [Pinning codefresh.yml for multi-git triggers]({{site.baseurl}}/docs/troubleshooting/common-issues/multi-git-triggers/) +- [Workflow process terminated by the system]({{site.baseurl}}/docs/troubleshooting/common-issues/workflow-terminated-by-system/) +- [Multi-line variable gets truncated with cf_export](cf-export-limitations) + +## Dynamic environment issues + +- [Validation Port warnings]({{site.baseurl}}/docs/troubleshooting/common-issues/validation-port-warnings/) + +## Deployment issues + +- [Forbidden Cluster Resources]({{site.baseurl}}/docs/troubleshooting/common-issues/forbidden-cluster-resources/) +- [Failed to get accounts clusters during workflow]({{site.baseurl}}/docs/troubleshooting/common-issues/failed-to-get-accounts-clusters-during-workflow/) + +## API/CLI issues + +- [Paging does not work for images and builds]({{site.baseurl}}/docs/troubleshooting/common-issues/paging-issues-builds-images/) diff --git a/_docs/troubleshooting/common-issues/build-step-no-such-file-or-directory.md b/_docs/troubleshooting/common-issues/build-step-no-such-file-or-directory.md new file mode 100644 index 000000000..1a1d48d56 --- /dev/null +++ b/_docs/troubleshooting/common-issues/build-step-no-such-file-or-directory.md @@ -0,0 +1,61 @@ +--- +title: "Build step: No such file or directory" +description: "" +group: troubleshooting +sub_group: common-issues +permalink: /:collection/troubleshooting/common-issues/build-step-no-such-file-or-directory/ +redirect_from: + - /docs/build-step-no-such-file-or-directory/ +toc: true +--- + +## Issue + +Following error in the logs of the build step: + + `Text` +{% highlight text %} +Step 3/6 : COPY /output /app +lstat output: no such file or directory +{% endhighlight %} + +## Possible cause +This issue generally occurs if you use an incorrect path to the Dockerfile or to the Build context. + +## Solution +Recheck, and use the correct path to Dockerfile and Build context + +{:start="1"} +1. The path to Dockerfile. + +{:start="2"} +2. The path to Build context. +**Build context** is where we can find your Dockerfile as well as running commands. Your Dockerfile must be relative to this directory. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/no_such_file_directory.png" +url="/images/troubleshooting/no_such_file_directory.png" +alt="codefresh_no_such_file_directory.png" +max-width="40%" +%} + +{{site.data.callout.callout_info}} +In case with codefresh.yml, to specify the path to build context you need to use `working_directory` +{{site.data.callout.end}} + + `build step` +{% highlight yaml %} +step_name: + type: build + title: Step Title + description: Free text description + working_directory: path/to/buildcontext + dockerfile: path/to/Dockerfile + image_name: owner/new-image-name + tag: develop +{% endhighlight %} + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/cant-find-your-organization-repositories.md b/_docs/troubleshooting/common-issues/cant-find-your-organization-repositories.md new file mode 100644 index 000000000..f96ec8ecf --- /dev/null +++ b/_docs/troubleshooting/common-issues/cant-find-your-organization-repositories.md @@ -0,0 +1,116 @@ +--- +title: "Can't find your organization repositories" +description: "Issues with adding Git triggers" +group: troubleshooting +sub_group: common-issues +redirect_from: + - /docs/cant-find-your-organization-repos/ +toc: true +--- +## Issue + +Unable to find the GitHub organization to with the repository to which to add a [git trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/). + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/cannot-find-repo.png" +url="/images/troubleshooting/cannot-find-repo.png" +alt="Repository not found" +max-width="60%" +caption="Repository not found" +%} + +### Solution + +1. Navigate to your GitHub user settings page: + 1. Log in to GitHub using your credentials. + 1. Navigate to your *Settings* page. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/github-user-menu.png" +url="/images/troubleshooting/github-user-menu.png" +alt="GitHub user menu" +max-width="40%" +caption="GitHub user menu" +%} + +{:start="2"} +1. Navigate to your Authorized applications: + In your *Personal settings* view, click *Authorized applications*. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/personal-settings.png" +url="/images/troubleshooting/personal-settings.png" +alt="Personal settings" +max-width="40%" +caption="Personal settings" +%} + +{:start="3"} +1. Locate and click the Codefresh authorized application. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/authorized-applications.png" +url="/images/troubleshooting/authorized-applications.png" +alt="Authorized applications" +max-width="40%" +caption="Authorized applications" +%} + +{:start="4"} +1. In the Organization access section find your organization. +1. If you _do not have Admin privileges for your organization_, click **Request access**, to request the necessary privileges from your administrator. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/request-access-to-codefresh-app.png" +url="/images/troubleshooting/request-access-to-codefresh-app.png" +alt="Request access" +max-width="40%" +caption="Request access" +%} + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/access-requested.png" +url="/images/troubleshooting/access-requested.png" +alt="Access requested" +max-width="40%" +caption="Access requested" +%} + +{:start="6"} +1. If _you are an organization administrator_, click **Grant access**. + You always have the option to revoke access. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/non-admin-grant-access.png" +url="/images/troubleshooting/non-admin-grant-access.png" +alt="Grant access" +max-width="40%" +caption="Grant access" +%} + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/non-admin-access-granted.png" +url="/images/troubleshooting/non-admin-access-granted.png" +alt="Access granted" +max-width="40%" +caption="Access granted" +%} + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/cant-find-your-private-repositories.md b/_docs/troubleshooting/common-issues/cant-find-your-private-repositories.md new file mode 100644 index 000000000..c3c02a953 --- /dev/null +++ b/_docs/troubleshooting/common-issues/cant-find-your-private-repositories.md @@ -0,0 +1,45 @@ +--- +title: "Can't find private repositories" +description: "" +group: troubleshooting +sub_group: common-issues +redirect_from: + - /docs/cant-find-your-private-repositories/ +toc: true +--- +## Issue +I want to add a private repository to Codefresh but can't find it in the repository list. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/private-repos.png" +url="/images/troubleshooting/private-repos.png" +alt="Adding private repositories" +caption="Adding private repositories" +max-width="60%" +%} + +## Possible cause +This means that you haven’t granted Codefresh permission to access private repositories. + +## Solution +Click the link on the right that says "private repositories". + +You can also enable the same thing from the user settings page: + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/allow-private-repositories.png" +url="/images/troubleshooting/allow-private-repositories.png" +alt="Allow private repositories" +caption="Allow private repositories" +max-width="70%" +%} + + +> Some Git providers redirect you to the provider’s permission page. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) diff --git a/_docs/troubleshooting/common-issues/cf-export-limitations.md b/_docs/troubleshooting/common-issues/cf-export-limitations.md new file mode 100644 index 000000000..12f89b5f3 --- /dev/null +++ b/_docs/troubleshooting/common-issues/cf-export-limitations.md @@ -0,0 +1,71 @@ +--- +title: "Multiline variable gets truncated with cf_export" +description: "Exporting multiline variables to subsequent pipeline steps" +group: troubleshooting +sub_group: common-issues +toc: true +--- + + + + +## Issue +Exporting a multi-line variable with `cf_export` within a pipeline truncates the variable. + +Running the pipeline in the example below, in the step `test` the value of the variable is truncated and only `line1` will appear. + +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + assign: + image: alpine + commands: + - export TEST=`echo "line1" && echo "line2" && echo "line3"` + - echo $TEST + - cf_export TEST + test: + image: alpine + commands: + - echo $TEST +{% endraw %} +{% endhighlight %} + +Every Codefresh pipeline has access to the that allows you to pass [environment variables]({{site.baseurl}}/docs/codefresh-yaml/variables/) from [one step to the next]. + +## Solution + +Encode the variable with `base64` to handle all special characters in the exported variable. + +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + assign: + image: alpine + commands: + - apk add --update coreutils + - export TEST=`echo "line1" && echo "line2" && echo "line3"` + - echo $TEST + - cf_export TEST=`echo $TEST | base64 -w 0` + test: + image: alpine + commands: + - echo $TEST + - echo `echo $TEST | base64 -d` +{% endraw %} +{% endhighlight %} + +If you run this pipeline, the `test` step will correctly print `line1 line2 line3` + + + +## Related articles +[cf_export utility in pipelines]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command) +[Passing variables between steps]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#using-docker-containers-as-build-tooling) +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) + + + + + diff --git a/_docs/troubleshooting/common-issues/could-not-tag-image.md b/_docs/troubleshooting/common-issues/could-not-tag-image.md new file mode 100644 index 000000000..bd71e3c19 --- /dev/null +++ b/_docs/troubleshooting/common-issues/could-not-tag-image.md @@ -0,0 +1,30 @@ +--- +title: "Failed to tag image" +description: "Failed to update your image with promote event, could not tag image" +group: troubleshooting +sub_group: common-issues +toc: true +--- + +## Issue +[Push step]({{site.baseurl}}/docs/codefresh-yaml/steps/push/) in your pipeline fails with the following error: + + +``` +[SYSTEM] Error: Failed to push image docker.io/example/my-image:1.0.0; caused by Error: Failed to update your image with promote event; caused by Error: NotFoundError: could not tag image +``` + +## Possible cause + +This issue occurs because of a race condition when multiple builds try to push the same image at the same time [to a Docker registry]({{site.baseurl}}/docs/docker-registries/push-image-to-a-docker-registry/). +For a pipeline, if two builds are triggered at the same time for the same commit, one of them will pass and the other will fail. + +## Solution + +Review the trigger setup according to the pipeline logic, and verify that is triggered only once when an event happens. + +It is not possible to push the same image at the same time to the same registry. Your pipeline configuration has probably [multiple triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) (such as a commit and tag) that trigger the pipeline twice. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) + diff --git a/_docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms.md b/_docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms.md new file mode 100644 index 000000000..8ec90ae22 --- /dev/null +++ b/_docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms.md @@ -0,0 +1,41 @@ +--- +title: "Disabling codefresh caching mechanisms" +description: "" +group: troubleshooting +sub_group: common-issues +redirect_from: + - /docs/disabling-codefresh-caching-mechanisms/ +toc: true +--- + +Docker builds can be optimized by caching images and layers used during the build. Codefresh facilitates two kinds of caches: + +1. Docker engine (local) cache: building images that reuse layers or are based on images that already exists will benefit from the docker engine cache fulfilling those dependencies immediately (just like when building on your local workstation). Codefresh takes care of persisting the docker cache and making it available for the current pipeline execution environment. +2. Last build cache: Codefresh will intelligently pull the last image successfully built and will use that when building the image using Docker's `--cache-from` build option. This optimization reduces build times for most scenarios. + +You can choose to opt out of each cache mechanism, both at the step definition level, or temporarily at the build execution level. + +### Disabling temporarily for the current build execution + +In the Build execution dialog, click on the "Advanced Options" button, under "More Options", select "Ignore Codefresh cache optimizations for build" or "Ignore Docker engine cache for build". + + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/cache-options.png" +url="/images/troubleshooting/cache-options.png" +alt="Cache options for a pipeline build" +caption="Cache options for a pipeline build" +max-width="50%" +%} + +Notice that these selections only affect the *specific* build that is launched from this dialog. +Any subsequent/automated builds will still use the default caching behavior. + +### Disabling consistently at the build definition + +In the build step YAML, set `no_cache: true` to disable docker local cache, and `no_cf_cache: true` to disable codefresh's additional optimizations such as `--cache-from`. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/dockerhub-rate-limit.md b/_docs/troubleshooting/common-issues/dockerhub-rate-limit.md new file mode 100644 index 000000000..d34d2fdc1 --- /dev/null +++ b/_docs/troubleshooting/common-issues/dockerhub-rate-limit.md @@ -0,0 +1,64 @@ +--- +title: "Error pulling image configuration: toomanyrequests" +description: "Too many requests to Docker Hub" +group: troubleshooting +sub_group: common-issues +toc: true +--- + +## Issue +Pipeline fails with the following error: + +``` +Continuing execution. +Pulling image codefresh/cfstep-helm:3.0.2 +error pulling image configuration: toomanyrequests: Too Many Requests. Please see https://docs.docker.com/docker-hub/download-rate-limit/ +``` +The image `codefresh/cfstep-helm` is just an example. This error can happen for other Docker images as well. + +Or, with this error message from Docker Hub: + +``` +You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit +``` + +## Possible cause + +This issue occurs because your pipeline has triggered the [Docker Hub limit](https://www.docker.com/blog/scaling-docker-to-serve-millions-more-developers-network-egress/){:target="\_blank"} announced in August 2020. + +Users who pull Docker images have the following limits: +* Free plan: Anonymous users: 100 pulls per 6 hours +* Free plan: Authenticated users: 200 pulls per 6 hours +* Pro plan: Unlimited +* Team plan: Unlimited + +> The limits depend on the [pricing plan](https://www.docker.com/pricing){:target="\_blank"} of the _user who performs the pull action_, and not the user who owns the Docker image. + + +If you don't have a Docker Hub integration in Codefresh, all your Docker images are pulled as an anonymous user and because Docker Hub [applies the rate limit for each IP address](https://docs.docker.com/docker-hub/download-rate-limit/), your whole Codefresh installation can easily hit the limits if you have many teams and users. + +## Solution + +* Add at least one Docker Hub integration in Codefresh, as described in [Docker Hub integrations]({{site.baseurl}}/docs/integrations/ci-integrations/docker-registries/docker-hub/). + +{% include image.html + lightbox="true" + file="/images/troubleshooting/two-dockerhub-integrations.png" + url="/images/troubleshooting/two-dockerhub-integrations.png" + alt="Docker Hub integrations in Codefresh" + caption="Docker Hub integrations in Codefresh" + max-width="90%" +%} + +This way, when Codefresh tries to pull an image, it uses the connected integration instead of sending anonymous requests. + +If the integration is for a Docker Hub pro/team plan, you have unlimited pulls. If the integration is for the free plan your rate limit is doubled. +We also advise to you add multiple Docker Hub integrations if it makes sense for your teams, as this action spreads the pull actions to multiple DockerHub accounts. + + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) + + + + diff --git a/_docs/troubleshooting/common-issues/error-code-137.md b/_docs/troubleshooting/common-issues/error-code-137.md new file mode 100644 index 000000000..25a70f4e0 --- /dev/null +++ b/_docs/troubleshooting/common-issues/error-code-137.md @@ -0,0 +1,47 @@ +--- +title: "Building image failed with exit code: 137" +description: "The command returned a non-zero code: 137" +group: troubleshooting +sub_group: common-issues +toc: true +--- + +## Issue +[Build step]({{site.baseurl}}/docs/codefresh-yaml/steps/build/) in pipeline fails with the following error: + +``` +The command 'XXXXXXX' returned a non-zero code: 137 +[SYSTEM] + Message Failed to build image: r.cfcr.io/:my-tag + Caused by Container for step title: Building Docker Image, step type: build, operation: Building image + failed with exit code: 137 +``` + +## Possible cause + +This issue occurs where you are low on pipeline resources. The build step does not have enough memory to finish building. You can get an overview of your build resources by clicking in the [metrics]({{site.baseurl}}/docs/configure-ci-cd-pipeline/monitoring-pipelines/#viewing-pipeline-metrics) tab in the build screen. + +{% include image.html +lightbox="true" +file="/images/troubleshooting/not-enough-resources/not-enough-memory.png" +url="/images/troubleshooting/not-enough-resources/not-enough-memory.png" +alt="Not enough memory" +caption="Not enough memory" +max-width="80%" +%} + +The error usually happens when Docker does not have enough memory, but it can also appear if there is not enough disk space. + +## Solution + +* [Set the build space for each pipeline build]({{site.baseurl}}/docs/pipelines/#runtime) . +* If that doesn't work, you need to either simplify your application, for example, split it to microservices, or run the pipeline on a larger machine. For example if the build fails on a `SMALL` machine you should run it on a `MEDIUM` one. + You can upgrade your account to get access to more resources by visiting your [Billing Settings](https://g.codefresh.io/account-admin/billing/). + + + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) + + + diff --git a/_docs/troubleshooting/common-issues/failed-to-get-accounts-clusters-during-workflow.md b/_docs/troubleshooting/common-issues/failed-to-get-accounts-clusters-during-workflow.md new file mode 100644 index 000000000..4fb2064f9 --- /dev/null +++ b/_docs/troubleshooting/common-issues/failed-to-get-accounts-clusters-during-workflow.md @@ -0,0 +1,29 @@ +--- +title: "Failed to get account clusters during workflow" +description: "" +group: troubleshooting +sub_group: common-issues +permalink: /:collection/troubleshooting/common-issues/failed-to-get-accounts-clusters-during-workflow/ +redirect_from: + - /docs/failed-to-get-accounts-clusters-during-workflow/ +toc: true +--- + +When a pipeline is running, Codefresh automatically sets up several [environment variables]({{site.baseurl}}/docs/pipelines/variables/) that are available to all pipeline steps. + +For Kubernetes clusters, if you have configured [at least one cluster]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/), the `KUBECONFIG` environment variable, containing the path to the kubeconfig file will become available in the pipeline. + +For more information about the `KUBECONFIG` environment variable, see the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable){:target="_blank"}. + +## Possible cause + +If your Kubernetes cluster appears in the Codefresh Dashboard (`Integrations` → `Kubernetes` → `Configure`) but is not accessible in a pipeline, then maybe: + +* The cluster is not accessible anymore via Codefresh. Try running `kubectl get node -owide` in your cloud shell to see the status of the nodes. +* The cluster has been deleted externally. + +## Solution +Contact us via the Intercom window on the bottom right of the Codefresh interface for additional assistance. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/forbidden-cluster-resources.md b/_docs/troubleshooting/common-issues/forbidden-cluster-resources.md new file mode 100644 index 000000000..214d22eae --- /dev/null +++ b/_docs/troubleshooting/common-issues/forbidden-cluster-resources.md @@ -0,0 +1,64 @@ +--- +title: "Forbidden Kubernetes resources" +description: "Cannot list namespaces or nodes in the Kubernetes dashboard" +group: troubleshooting +sub_group: common-issues +toc: true +--- + +## Issue + +Errors in the Kubernetes dashboard view: + +{% include image.html +lightbox="true" +file="/images/troubleshooting/kubernetes-access/forbidden.png" +url="/images/troubleshooting/kubernetes-access/forbidden.png" +alt="Kubernetes access error" +caption="Kubernetes access error" +max-width="80%" +%} + +OR + +Generic error: + +{% include image.html +lightbox="true" +file="/images/troubleshooting/kubernetes-access/unknown-error.png" +url="/images/troubleshooting/kubernetes-access/unknown-error.png" +alt="Kubernetes unknown error" +caption="Kubernetes unknown error" +max-width="80%" +%} + + + +## Possible causes + +The service account you have connected in Codefresh does not have enough permissions for your cluster. + +Codefresh accesses your Kubernetes cluster via the standard Kubernetes API. As such, to work with the cluster, Codefresh needs correct [RBAC privileges](https://kubernetes.io/docs/reference/access-authn-authz/rbac/){:target="\_blank"}. + +## Solution + +To moinitor the cluster, the service account used by Codefresh should have at least view privileges, and to also deploy to the cluster, the service account requires additional privileges. + +Check the following: + +1. What service account Codefresh uses +1. What role is assigned to this service account +1. What access rights are possible with that role + +You can see the role access with the standard `kubectl` commands: + +``` +kubectl get clusterrole codefresh-role -o yaml +``` + +Make sure that the privileges are at least those described in the [integration page]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/#the-propersecure-way). + + + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/git-clone-step-issue.md b/_docs/troubleshooting/common-issues/git-clone-step-issue.md new file mode 100644 index 000000000..c24160f22 --- /dev/null +++ b/_docs/troubleshooting/common-issues/git-clone-step-issue.md @@ -0,0 +1,35 @@ +--- +title: "Clone step failed: Command [git checkout $REVISION] exited with code [1]" +description: "" +group: troubleshooting +sub_group: common-issues +permalink: /:collection/troubleshooting/common-issues/git-clone-step-issue/ +redirect_from: + - /docs/build-step-no-such-file-or-directory/ +toc: true +--- + +## Issue + +Error message: + +`Clone step failed: Command [git checkout $REVISION] exited with code [1]` + +## Possible cause +The GIT clone step may fail in case you have CRLF (**Windows**) end of line characters instead of LF (**Unix**). + + `Text` +{% highlight text %} +Aborting +Command [git checkout $REVISION] exited with code [1] + [SYSTEM] Error: Failed to run git-clone step: Cloning main repository...; caused by NonZeroExitCodeError + : Container for step title: Cloning main repository..., step type: git-clone, operation: Cloning reposit + ory failed with exit code: 1 +{% endhighlight %} + +## Solution + +Create a .gitattributes file in your repository (for all branches) that will enforce Git to commit files with LF. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/handling-commit-messages-with-a-quote-character.md b/_docs/troubleshooting/common-issues/handling-commit-messages-with-a-quote-character.md new file mode 100644 index 000000000..9ce3e36e7 --- /dev/null +++ b/_docs/troubleshooting/common-issues/handling-commit-messages-with-a-quote-character.md @@ -0,0 +1,62 @@ +--- +title: "Handling commit messages with a quote character" +description: "" +group: troubleshooting +sub_group: common-issues +permalink: /:collection/troubleshooting/common-issues/handling-commit-messages-with-quotes/ +redirect_from: + - /docs/handling-commit-messages-with-quotes/ +toc: true +--- +## Issue + +Error on commit: +`Error parsing YAML file: can not read a block mapping entry; a multiline key may not be an implicit key at line 13, column 30` + +## Possible cause + +Commit message that determines the logical flow of a pipeline includes quotes as part of the message. +One example of using a commit message to decide the flow of a pipeline, is to skip continuous integration if the commit message contains `"--skip-ci"`. Since the commit message contains a quote character, it can result in the error. + +Example: + + `YAML` +{% highlight yaml %} +{% raw %} + build_step: + type: build + image_name: codefreshio/yaml-example-unit-test-compose + dockerfile: Dockerfile + tag: ${{CF_BRANCH}} + when: + condition: + all: + noSkipCiInCommitMessage: 'includes(lower("${{CF_COMMIT_MESSAGE}}"), "--skip-ci") == false' +{% endraw %} +{% endhighlight %} + +This is a string quotes issue. The commit message uses a ' symbol, as does the YAML file itself to denote string. This breaks the YAML file. + + +## Solution + +Use a multiline string. + + `YAML` +{% highlight yaml %} +{% raw %} + build_step: + type: build + image_name: codefreshio/yaml-example-unit-test-compose + dockerfile: Dockerfile + tag: ${{CF_BRANCH}} + when: + condition: + all: + noSkipCiInCommitMessage: | + includes(lower("${{CF_COMMIT_MESSAGE}}"), "--skip-ci") == false +{% endraw %} +{% endhighlight %} + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/multi-git-triggers.md b/_docs/troubleshooting/common-issues/multi-git-triggers.md new file mode 100644 index 000000000..9210d2d3e --- /dev/null +++ b/_docs/troubleshooting/common-issues/multi-git-triggers.md @@ -0,0 +1,117 @@ +--- +title: "Using multi-git triggers" +description: "Pinning codefresh.yml to a specific branch" +group: troubleshooting +sub_group: common-issues +toc: true +--- + +Codefresh has the capability to store the [pipeline definition]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#using-a-codefreshyml-for-the-source-code-repository) in the same Git repository as the source code. + + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/from-repo.png" +url="/images/troubleshooting/from-repo.png" +alt="Storing the pipeline in the repository" +caption="Storing the pipeline in the repository" +max-width="50%" +%} + + + +By default, when a git trigger is happening, Codefresh will fetch the `codefresh.yml` file from the branch that is mentioned in the webhook. This is the behavior you expect most of the times as it allows you to version your pipelines and have different versions for different branches. + +## Overriding the branch of codefresh.yml + +Sometimes however, you want a pipeline to be triggered by another git repository (other than the main one). An example would be: + +1. Repository A contains a deployment pipeline with associated `codefresh.yml`. +1. Repository B is creating binary artifacts that are deployed by pipeline A. + +In those cases, Codefresh supports adding [multiple git triggers]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) on the same pipeline. This way pipeline A will be triggered by commits to both repository A and repository B. Notice however that the `codefresh.yml` file used will still be fetched as mentioned in the webhook. + +This creates issues with pipeline definitions because repository B might not have a `codefresh.yml` at all, or it might have the wrong one in the branch that actually created the webhook. Another bad scenario is when the branch mentioned in the webhook from repository B does not even exist in repository A. + +To solve this issue, you can pin down the branch that will be used for the source of `codefresh.yml`. In the example above, you can specify that no matter the branch of repository B that triggered the commit, the pipeline should only use the `master` branch of pipeline A regardless of what is mentioned in the webhook. + +To perform this pinning you need to use the [Codefresh CLI](https://codefresh-io.github.io/cli/installation/) and [setup authentication](https://codefresh-io.github.io/cli/getting-started/) with your Codefresh account. + +Once this is done check that your account is locally accessible by running + +``` +codefresh get pipelines +``` + +You should see a long list with your pipelines on the terminal output. + +Export you pipeline that needs to have the `codefresh.yml` pinned (pipeline A in the example above) + +``` +codefresh get pipelines kostis-codefresh/trivial-go-web/from-repo --output=yaml > custom-spec.yaml +``` + +Open the `custom-spec.yaml` file with a text editor and locate the `specTemplate` block. Then add there a new `revision` property with the branch that contains the `codefresh.yml` that you want to always be used (repository A in our example above). + + +{% highlight yaml %} +{% raw %} +spec: + triggers: [] + contexts: [] + variables: + - key: PORT + value: '8080' + specTemplate: + location: git + repo: kostis-codefresh/trivial-go-web + path: codefresh.yml + revision: master + steps: {} + stages: [] +{% endraw %} +{% endhighlight %} + +In the example above we specified the `master` branch. Now even if the webhook from pipeline B mentions another branch (e.g. develop), Codefresh will still use the master branch from pipeline A. + +To apply your changes, replace the pipeline in Codefresh from your local copy + +``` +codefresh replace pipelines kostis-codefresh/trivial-go-web/from-repo -f custom-spec.yaml +``` + +You should get a message that your pipeline is updated. This concludes the setup of the pipeline specification. Now you also need to override the clone step of the pipeline itself as explained in the next section. + +## Overriding the implicit clone step *Deprecated* + +*Deprecated*: The information below is only relevant if you are using our old Personal Git Provider system, and not our current one. Therefore, you should only use this for reference when working on old pipelines. + +All pipelines in Codefresh that are connected to a git repository have an automatic git clone step defined for them. +This clone step will also fetch the code from the branch mentioned in the webhook. + +To override this default behavior as well and force a specific branch, you can use a [custom clone step]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) like this: + + +{% highlight yaml %} +{% raw %} +main_clone: + type: git-clone + title: Checking out git repository + repo: ${{CF_REPO_OWNER}}/${{CF_REPO_NAME}} + git: github + revision: ${{CF_REVISION}} +{% endraw %} +{% endhighlight %} + +In the example above we have forced the git checkout to happen out of the master branch, regardless of the branch mentioned in the webhook. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) +[Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + + + + diff --git a/_docs/troubleshooting/common-issues/no-dockerfile-found.md b/_docs/troubleshooting/common-issues/no-dockerfile-found.md new file mode 100644 index 000000000..3758f98c8 --- /dev/null +++ b/_docs/troubleshooting/common-issues/no-dockerfile-found.md @@ -0,0 +1,89 @@ +--- +title: "No Dockerfile found" +description: "Failed to fetch the Dockerfile from path" +group: troubleshooting +sub_group: common-issues +redirect_from: + - /docs/no-dockerfile-found/ +toc: true +--- + +## Issue +[Build step]({{site.baseurl}}/docs/codefresh-yaml/steps/build/) in your pipeline fails with the error message: + +"Repository does not contain a Dockerfile. Please check the pipeline configuration" +OR +"Failed to fetch the Dockerfile from path" + +## Possible cause + +This issue occurs when you are trying to build a Docker image and the pipeline step cannot find a Dockerfile. +It might be helpful to include a dummy step in your pipeline that prints all files in the workspace. This way you can verify what files are available to the pipeline. + +`pipeline step` +{% highlight yaml %} +{% raw %} +print_pwd_files: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +## Solution + +There are two ways to address this error: + +### Include clone step with name `main_clone` +First, make sure that you have at least one [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) in your pipeline, `main_clone` as the name. +This way the current folder is automatically set up in the project folder of the Git repository. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: kostis-codefresh/example_nodejs_postgres + revision: master + git: github + myDockerImage: + title: 'Building My Docker Image' + type: build + dockerfile: Dockerfile + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +### Verify target directory of build step +Secondly, if you check out multiple Git repositories or use a different name in your Git clone step, make sure that the build step looks at the correct directory: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + checkoutApp: + title: 'Cloning a repository...' + type: git-clone + repo: kostis-codefresh/trivial-go-web + revision: master + git: github + myDockerImage: + title: 'Building Docker Image' + type: build + dockerfile: Dockerfile + working_directory: './trivial-go-web' + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +Notice that the `working_directory` property of the build step, searches for the Dockefile in the folder named `trivial-go-web` instead of the root folder of the pipeline workspace. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) diff --git a/_docs/troubleshooting/common-issues/non-admin-users-support.md b/_docs/troubleshooting/common-issues/non-admin-users-support.md deleted file mode 100644 index 91f8e6481..000000000 --- a/_docs/troubleshooting/common-issues/non-admin-users-support.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Non admin users can are thrown out back to classic Codefresh" -description: "" -group: troubleshooting -sub_group: common-issues -toc: true ---- - -For the time being only admin users of your account will be able to use Codefresh Argo Platform. - -The reason for this is that the roles and permissions model has not yet been released and thus all users are able to view all entities. - -The roles and permissions model will be released before going into GA. diff --git a/_docs/troubleshooting/common-issues/paging-issues-builds-images.md b/_docs/troubleshooting/common-issues/paging-issues-builds-images.md new file mode 100644 index 000000000..608bd4c4c --- /dev/null +++ b/_docs/troubleshooting/common-issues/paging-issues-builds-images.md @@ -0,0 +1,105 @@ +--- +title: "Paging issues for builds and images" +description: "API and CLI operations for paging results do not work" +group: troubleshooting +sub_group: common-issues +toc: true +--- + +On January 9th 2021 the API used for retrieving builds and images in Codefresh will be revamped with a new implementation +that is far more efficient than the previous one. + +In summary, the paging mechanism will change and instead of being page-based it will be cursor based. The end result is a much faster implementation but also has side effects +on how you can move among results in the returned list. + +## Problem description + +In the old implementation you could fetch any possible page within the results and each call was completely independent from the previous one. For example you could do + +1. `GET /workflow?page=5` +1. `GET /workflow?page=2` +1. `GET /workflow?page=8` + +or + +1. `GET /workflow?page=1` +1. `GET /workflow?page=4` +1. `GET /workflow?page=2` + +The same thing was true for both [builds](https://codefresh-io.github.io/cli/builds/get-build/) and [image listing](https://codefresh-io.github.io/cli/images/get-image/). + +This method will **NO** longer work after January 9th 2021. The reason is that with the new implementation there is a database cursor behind the scenes that +tracks the current position within the result list. You can only go back and forward to the next or previous page but never jump to an arbitrary page. + +You need to check your Codefresh custom integrations that use the CLI or the [Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/) to see if you have scenarios +where you are requesting pages in an out-of-order manner. + +After 7th January 2021 both of the examples shown above will become **invalid operations**. + +## The solution + +If you have cases where your custom integration uses the Codefresh API and CLI to list images and/or builds with arbitrary page numbers you need to change them and +make them sequential. The only exception to this rule is that you can always go back to page 1 (resetting the cursor to the first position). + +Getting pages in order: + +1. `GET /workflow?page=1` +1. `GET /workflow?page=2` +1. `GET /workflow?page=3` +1. `GET /workflow?page=4` + +Moving to the next and previous page: + +1. `GET /workflow?page=1` +1. `GET /workflow?page=2` +1. `GET /workflow?page=1` +1. `GET /workflow?page=2` +1. `GET /workflow?page=3` + +Going back to page 1: + +1. `GET /workflow?page=1` +1. `GET /workflow?page=2` +1. `GET /workflow?page=1` +1. `GET /workflow?page=4` +1. `GET /workflow?page=1` + +All of the examples shown above are valid with the new paging implementation. + +## Handling concurrent API connections that list images and/or builds + +By default you can have only one concurrent CLI/API connection for fetching lists of builds/images. If you use multiple connections +they will all have the same cursor, and using them all at once will yield undefined results. + +To overcome this, you can use the `X-Pagination-Session-Id` header in your API calls and pass any value you see fit that makes your connection unique. + +Note that if you use the [Codefresh CLI in a Codefresh pipeline]({{site.baseurl}}/docs/integrations/codefresh-api/#using-codefresh-from-within-codefresh) the session id is automatically set for you with a value of `{workflowId} + {stepsContainerId}` meaning that you can use multiple steps with the CLI in a single pipeline without any race conditions. + +## Advantages of the new mechanism + +Apart from increased performance, the new implementation also allows you to use negative numbers for going to the "previous" page. This is very handy for querying existing builds +while several new builds are becoming active (and thus being added to the list in real time). + +This new scenario is also possible with the new implementation: + +1. `GET /workflow?page=1` +1. `GET /workflow?page=2` +1. `GET /workflow?page=1` +1. `GET /workflow?page=0` +1. `GET /workflow?page=-1` (get the previous page that contains brand new builds) + +The method will work in both the Codefresh API and the CLI. + + + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) + + + + + + + + + diff --git a/_docs/troubleshooting/common-issues/restoring-data-from-pre-existing-image-hangs-on.md b/_docs/troubleshooting/common-issues/restoring-data-from-pre-existing-image-hangs-on.md new file mode 100644 index 000000000..3db160f39 --- /dev/null +++ b/_docs/troubleshooting/common-issues/restoring-data-from-pre-existing-image-hangs-on.md @@ -0,0 +1,51 @@ +--- +title: "Hangs on restoring data from pre-existing image" +description: "" +group: troubleshooting +sub_group: common-issues +redirect_from: + - /docs/restoring-data-from-pre-existing-image-hangs-on/ +toc: true +--- + +## Issue + +Restoring data from pre-existing image step seems to hang when starting new build for pipeline. + +## Possible cause + +This issue might occur when your volume size is large. + +Codefresh has the unique capability of working on a single file system between different builds, by provisioning a volume per pipeline and restoring it upon starting a new build. + +If the first step seems to hang for a significant amount of time when restoring the data, this might suggest that your volume size is very big. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/restore-data-hangs-example.png" +url="/images/troubleshooting/restore-data-hangs-example.png" +alt="First build step hangs when starting new build - example" +caption="First build step hangs when starting new build - example" +max-width="60%" +%} + +## Solution +Delete that volume and start from scratch. +* In the build wizard pop-up, from **Advanced Options**, select **Reset pipeline volume**. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/reset-volume-for-hanging-step.png" +url="/images/troubleshooting/reset-volume-for-hanging-step.png" +alt="Reset volume for pipeline build" +caption="Reset volume for pipeline build" +max-width="60%" +%} + +>Reseting the pipeline volume may cause your build to take longer than usual. + + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/the-docker-image-does-not-exist-or-no-pull-access.md b/_docs/troubleshooting/common-issues/the-docker-image-does-not-exist-or-no-pull-access.md new file mode 100644 index 000000000..cef686170 --- /dev/null +++ b/_docs/troubleshooting/common-issues/the-docker-image-does-not-exist-or-no-pull-access.md @@ -0,0 +1,32 @@ +--- +title: "Docker image does not exist or no pull access" +description: "" +group: troubleshooting +sub_group: common-issues +permalink: /:collection/troubleshooting/common-issues/the-docker-image-does-not-exist-or-no-pull-access/ +redirect_from: + - /docs/the-docker-image-does-not-exist-or-no-pull-access/ +toc: true +--- + +## Issue +Workflow's process fails with similar error message: + + [SYSTEM] Error: Failed to pull base image: OWNER/REPO:TAG; caused by Error: (HTTP code 404) no such image - no such image: OWNER/REPO:TAG: No such image: codefreshdemo/demochat:feature_test + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/codefresh_image_not_found.png" +url="/images/troubleshooting/codefresh_image_not_found.png" +alt="Codefresh image not found error" +caption="Codefresh image not found error" +max-width="60%" +%} + +## Solution +1. If this docker image was created in Codefresh and hasn't been pushed to docker registry. Go to the tab `Images` and check the tag and name of this image. +2. If this docker image was pushed to docker registry. Go to the Integration page and check that you integrated with this docker registry. + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/using-ssh-keys.md b/_docs/troubleshooting/common-issues/using-ssh-keys.md new file mode 100644 index 000000000..5760cf862 --- /dev/null +++ b/_docs/troubleshooting/common-issues/using-ssh-keys.md @@ -0,0 +1,46 @@ +--- +title: "How to use SSH keys in freestyle steps" +description: "Running commands remotely from Codefresh Pipeline" +group: troubleshooting +sub_group: common-issues +toc: true +--- + +You can easily connect to external servers in Codefresh pipelines and run commands with them via SSH. + +First, you need to create or find a Docker image with the SSH client. A good choice is [https://hub.docker.com/r/praqma/network-multitool](https://hub.docker.com/r/praqma/network-multitool) as it has several other networking tools inside. + +Then create a freestyle step in your pipeline like this: + + +{% highlight yaml %} +{% raw %} + ssh: + title: "Executing command over SSH" + type: "freestyle" + image: "praqma/network-multitool" + commands: + - mkdir /root/.ssh + - echo ${{SSH_KEY}} | base64 -d > /root/.ssh/id_rsa ## Value of ${{SSH_KEY}} is base64 encoded + - chmod 600 ~/.ssh/id_rsa + - eval $(ssh-agent -s) + - ssh-add ~/.ssh/id_rsa + - ssh -o "StrictHostKeyChecking no" ${{MY_USER}}@${{MY_HOST}} + - ssh ${{MY_USER}}@${{MY_HOST}} 'uptime' +{% endraw %} +{% endhighlight %} + + +The pipeline expects some [variables]({{site.baseurl}}/docs/codefresh-yaml/variables/) called `SSH_KEY, MY_USER, MY_HOST` that you can enter directly in the pipeline or fetch from [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/). + +Replace `uptime`, with your own command that you want executed on the remote host. + +## What to read next + +* [How to use SSH keys with GIT]({{site.baseurl}}/docs/integrations/git-providers/#ssh-keys) +* [How to deploy with SCP]({{site.baseurl}}/docs/yaml-examples/examples/deploy-to-tomcat-via-scp/) +* [How to deploy with FTP]({{site.baseurl}}/docs/yaml-examples/examples/transferring-php-ftp/) + + + + diff --git a/_docs/troubleshooting/common-issues/validation-port-warnings.md b/_docs/troubleshooting/common-issues/validation-port-warnings.md new file mode 100644 index 000000000..c608adde3 --- /dev/null +++ b/_docs/troubleshooting/common-issues/validation-port-warnings.md @@ -0,0 +1,58 @@ +--- +title: "Validation port warnings" +description: "" +group: troubleshooting +sub_group: common-issues +redirect_from: + - /docs/validation-port-warnings/ +toc: true +--- + +## Issue + +Warning regarding the validation port on trying to launch a service or composition. Codefresh uses the validation port to verify that your application is up. +Launching a pipeline showed the following warning messages in the build log: + + * “Encountered a problem while validating your application. Please check your pipeline configuration.” + * “No default port was configured”. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/validation-port-warning.png" +url="/images/troubleshooting/validation-port-warning.png" +alt="Validation port warning" +caption="Validation port warning" +max-width="40%" +%} + + + +## Possible cause + +Though the pipeline is launched successfully, the Codefresh **health check** couldn't identify which URL to validate or didn’t get a response from the specified application port. + +When launching a pipeline, Codefresh exposes the ports specified in the repository’s Dockerfile. Codefresh enables you to validate that your application is up by running a '**health check**' to test the connection to one of the exposed ports. + +## Solution + +{:start="1"} +1. Make sure that you listen to one of your exposed ports: + * If you use a Dockerfile, listen to a port in your application. + * If you use a template, listen to one of the exposed ports in either the template, or in the ports list. (If you are not using a Dockerfile, you can find the ports list in the **Pipeline** view. + +{:start="2"} +2. Navigate to **Repositories > Your Repository > Launch Settings**, and verify that the **Application Port** is the same one that your application exposes, and listen to it. This is the port on which Codefresh runs the **health check**. + +{% include +image.html +lightbox="true" +file="/images/troubleshooting/port-for-health-check.png" +url="/images/troubleshooting/port-for-health-check.png" +alt="Port for health check" +caption="Port for health check" +max-width="40%" +%} + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/common-issues/workflow-terminated-by-system.md b/_docs/troubleshooting/common-issues/workflow-terminated-by-system.md new file mode 100644 index 000000000..39bea3c22 --- /dev/null +++ b/_docs/troubleshooting/common-issues/workflow-terminated-by-system.md @@ -0,0 +1,51 @@ +--- +title: "Workflow process terminated by the system" +description: "" +group: troubleshooting +sub_group: common-issues +permalink: /:collection/troubleshooting/common-issues/workflow-terminated-by-system/ +redirect_from: + - /docs/workflow-terminated-by-system/ +toc: true +--- + +## Issue + +Workflow process terminated by the system + + +## Possible cause +If you have a long workflow that takes more than several minutes to finish, it can be automatically +terminated by the system. As when there isn't any update in the build process logs after more than 45 minutes. + +By default, the system terminates a build step that seems "inactive" after 45 minutes. If your step does not print anything in the log for 45 minutes, it is considered inactive. + + + +## Solution + +You can choose one of the following suggestions to resolve it: + +- Review your Dockerfile and steps of your workflow to decrease the time needed to execute the workflow. +- Move to the dedicated infrastructure configuration (Codefresh on-premises version). +- Add an additional log to the process (see below for an example). +- Contact us to talk about startup pricing. + +{{site.data.callout.callout_warning}} +If your workflow process has been terminated by the system for another reason that is not listed here, please let us know. +{{site.data.callout.end}} + +As an example, if you have a long running process that takes more than 45 minutes you can try adding +a command that echoes something every 10 minutes to STDOUT before your command, like this: + +``` +SlowFreeStyleStep: + title: Performing a slow process + image: my-docker-image:1.0 + commands: + - /bin/bash -c "while true; do echo Keep alive... && sleep 600; done &" + - ./my-really-slow-script.sh +``` + +## Related articles +[Troubleshooting common issues]({{site.baseurl}}/docs/troubleshooting/common-issues) \ No newline at end of file diff --git a/_docs/troubleshooting/personal-git-deprecation.md b/_docs/troubleshooting/personal-git-deprecation.md new file mode 100644 index 000000000..d61c29638 --- /dev/null +++ b/_docs/troubleshooting/personal-git-deprecation.md @@ -0,0 +1,43 @@ +--- +title: "Deprecation of personal Git integrations" +description: "Moving Git integrations to accounts instead of individual users" +group: troubleshooting +toc: true +--- + + +Codefresh supports all major Git providers, GitHub, GitLab, Bitbucket. Through [Git integrations]({{site.baseurl}}/docs/integrations/git-providers/) you can connect to the respective Git provider. Signing up with Codefresh typically requires you to use a Git provider for your basic information (and in the past, an automatic integration was created with the Git provider that you used during initial sign-up). + + +At Codefresh, a single user can belong to multiple *Accounts* (think GitHub organizations). Typically, a Codefresh user represents a single person, while an *Account* represents a company or team. + +Until July 2019, Codefresh allowed you to create a Git integration either at the account level or at the user level. This has been problematic with several customer scenarios. + +## The problem of personal Git integrations + +While on paper the flexibility of adding Git integrations in different levels (account or user) looks good, in practice it has been very confusing for users/customers + + * Codefresh introduced login with providers which aren’t Git based - Google, [LDAP]({{site.baseurl}}/docs/enterprise/single-sign-on/sso-ldap/), [SAML]({{site.baseurl}}/docs/enterprise/single-sign-on/sso-setup-saml2/). This was causing problems for accounts which started creating pipelines leveraging the personal git context of each user and having new users signing up without a Git provider + * Codefresh required Git permissions from each user that signed up instead of allowing to create a dedicated integration to your git provider of choice, allowing a company to better manage codefresh access control to their Git organization + +These problems were exacerbated with the introduction of [explicit clone steps]({{site.baseurl}}/docs/troubleshooting/git-step-migration/). + +To this end, personal Git providers (i.e. Git integration tied to a person instead of an *account*/company) were deprecated in July 2019. This change makes Git integrations much more consistent, especially for people that work within a company organization. + + +## How to migrate your Git integration and move away from a personal Git provider + +>Note that if you created a Codefresh account in July 2019 and later, there is nothing to do. Your user doesn't have a personal git provider any more. No action is needed on your part. + +Make sure that you have at least one Git integration defined in your account (if until now you only used the implicit one that was created when you signed-up with Codefresh) + +Then, for all existing [pipelines]({{site.baseurl}}/docs/configure-ci-cd-pipeline/introduction-to-codefresh-pipelines/): + +* make sure that they have an [explicit git clone step]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) +* the git clone step has an explicit `git` property that defines the provider (do not leave empty this property ) +* the `git` provider has an actual value and **not** `CF-default`. This value used to represent the personal git provider of a user and is now deprecated. Use the name of an actual git integration as is defined in the [providers page]({{site.baseurl}}/docs/integrations/git-providers/) + +Once that is done, contact Codefresh via [http://support.codefresh.io](http://support.codefresh.io) or email, and we can take care of automatic migration of all your existing [triggers]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) that exist in pipelines that are still using a personal Git provider. + + + diff --git a/_docs/troubleshooting/runtime-issues.md b/_docs/troubleshooting/runtime-issues.md index 31b2a2c04..9db9c89b6 100644 --- a/_docs/troubleshooting/runtime-issues.md +++ b/_docs/troubleshooting/runtime-issues.md @@ -1,5 +1,5 @@ --- -title: "Runtimes" +title: "GitOps runtime issues" description: "" group: troubleshooting toc: true diff --git a/_docs/whats-new/whats-new.md b/_docs/whats-new/whats-new.md deleted file mode 100644 index aa22dce07..000000000 --- a/_docs/whats-new/whats-new.md +++ /dev/null @@ -1,1347 +0,0 @@ ---- -title: "What's new in Codefresh?" -description: "" -group: whats-new -redirect_from: - - /docs/whats-new/ -toc: true ---- - -Built on Argo, the world’s most popular and fastest-growing open source software delivery, Codefresh unlocks the full enterprise potential of Argo Workflows, Argo CD, Argo Events, and Argo Rollouts, providing a control-plane for managing them at scale. - - - -## October 2022 - -### Features & enhancements -{::nomarkdown} -
                -{:/} - -#### Kubernetes version runtime support -We now support Kubernetes server versions 1.21 and higher. - -{::nomarkdown} -
                -{:/} - -#### Request Routing Service for runtimes -We have changed the routing mechanism for hybrid runtimes. URL requests and webhooks are now routed through a new internal routing service instead of through the ingress controller. - -The change is effective from runtime version 0.0.543 and higher. If you already have runtimes installed, this change does not require any action from you, both to upgrade to the new runtime version or retain existing runtimes. Older runtimes continue to use the ingress controller for routing purposes. - -See [Hybrid runtime architecture]({{site.baseurl}}/docs/getting-started/architecture/#hybrid-runtime-architecture) and [Request Routing Service]({{site.baseurl}}/docs/getting-started/architecture/#request-routing-service). - -{::nomarkdown} -
                -{:/} - -#### More Git providers for runtimes -Codefresh runtimes now support GitHub Enterprise, GitLab, and Bitbucket as Git providers, apart from GitHub, which is the default. - -When installing the first hybrid or hosted runtime for your account, you can define the Git provider of choice. Because Codefresh creates a configuration repository that is shared with subsequent runtimes in the same account, you cannot change the Git provider for a different runtime in the same account. - -Each Git provider requires runtime tokens with specific scopes and has specific installation requirements. Once installed, you can authorize access to the Git provider through OAuth or a personal access token. - -Note that GitLab cloud is not supported for hosted runtimes. - -See [Git provider and repo flags]({{site.baseurl}}/docs/runtime/installation/#git-provider-and-repo-flags). - -{::nomarkdown} -
                -{:/} - -#### Turn off notifications for runtimes -Codefresh alerts you to runtimes that are insecure or have invalid or expired Git personal access tokens. You can turn off these notifications selectively for runtimes for which these alerts are less critical, such as non-production runtimes. - -The option is user-specific, and applies only to runtimes in the user's account. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-dismiss-runtime-notifications.png" - url="/images/whats-new/rel-notes-oct22-dismiss-runtime-notifications.png" - alt="Turn off notifications for selected runtime" - caption="Turn off notifications for selected runtime" - max-width="80%" -%} - -Runtimes with disabled notifications are prefixed with an icon as in the picture below. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-dimiss-notifications-indication.png" - url="/images/whats-new/rel-notes-oct22-dimiss-notifications-indication.png" - alt="Runtime with disabled notifications" - caption="Runtime with disabled notifications" - max-width="80%" -%} - -{::nomarkdown} -
                -{:/} - -#### Rollout Player for deployments -Managing ongoing rollouts during a deployment is now simple with the Rollout Player. Clicking the rollout name in Timeline > Updated Services, displays both the visualization of the steps in the rollout and the Rollout Player. With the Rollout Player you can control individual steps in an ongoing rollout and even promote the rollout to a release. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct-22-rollout-player.png" - url="/images/whats-new/rel-notes-oct-22-rollout-player.png" - alt="Rollout Player" - caption="Rollout Player" - max-width="40%" -%} - - -The Rollput Player allows you to: -* Resume an indefinitley paused step -* Forward a step by skipping its execution -* Promote the rollout to deployment by skipping remaining pause, analysis - -{::nomarkdown} -
                -{:/} - -#### Context menu for application resources -We have enhanced the functionality for application resources in the Current State tab with the context menu for resources. The options available differ according to the type of resource. - - - -**On-demand sync for individual application resources** -Sync is a context menu option available for all resources that track sync status. You can sync individual resources as needed or when out-of-sync without synchronizing or refreshing the application. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct-22-sync-context-menu.png" - url="/images/whats-new/rel-notes-oct-22-sync-context-menu.png" - alt="Sync option in resource context menu" - caption="Sync option in resource context menu" - max-width="50%" -%} - -**Rollout resource actions** -The context menu for `rollout` resource types have actions to control the rollout. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-rollout-context-menu.png" - url="/images/whats-new/rel-notes-oct22-rollout-context-menu.png" - alt="Context menu options for Rollout resource" - caption="Context menu options for Rollout resource" - max-width="50%" -%} - -{::nomarkdown} -
                -{:/} - -#### Other enhancements - -**Git Sources as Application Type filter** -The list of filters for Application Type in the Applications dashboard includes the Git Source filter. Filtering by Git Source shows `Git Source Apps` which are applications created by Codefresh that store definitions of Argo Project resources. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-app-type-git-source.png" - url="/images/whats-new/rel-notes-oct22-app-type-git-source.png" - alt="Git Source as Application Type filter" - caption="Git Source as Application Type filter" - max-width="40%" -%} - - -**Manifests for Analysis Runs** -Analysis Run now shows the manifest in addition to the run results. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-analysis-run-manifest.png" - url="/images/whats-new/rel-notes-oct22-analysis-run-manifest.png" - alt="Manifest for Analysis Run" - caption="Manifest for Analysis Run" - max-width="40%" -%} - -{::nomarkdown} -
                -{:/} - -### Bug fixes - -{::nomarkdown} -
                -{:/} - -**Runtimes** - -* 500: Internal Server Error when adding cluster command to hosted runtime. -* Commit SHA link in Activity Log goes to the Home page instead of to the Commit URL for the Git provider. -* Ingress controller errors for cluster even when `skip-ingress` flag is defined. -* Retry mechanism requests cause delay in Git integration checks. -* For hosted runtimes, Git Source is not displayed though the Connect to Git provider step is marked as complete. -* No option to log out on selecting invalid authentication mode. -* Removing a managed cluster does not display any indication in Codefresh UI. -* Up-to-date runtimes display upgrade indication. - - -{::nomarkdown} -
                -{:/} - -**Applications** -* Applications deleted in Git displayed as errors, or as Missing in Codefresh. -* Tagging/untagging favorite application breaks relationship to parent application. -* Application definitions validation for cluster URL that does not exist shows wrong entity type. -* Incorrect number of replicas for previous image in Applications dashboard. -* Mismatch between information reported for cluster and namespace in Applications dashboard and Images. -* Source link in Timeline tab redirects to incorrect branch. -* Missing Health indication for Argo Rollouts in Codefresh UI. - -{::nomarkdown} -
                -{:/} - -**Delivery Pipelines and workflows** -* 100% CPU consumption for workflows with more than 20 nodes. -* Discard Changes button enabled when there are no changes. - - -## September 2022 - - - -### Features & enhancements -{::nomarkdown} -
                -{:/} - -#### Enriched application header -Every application includes a header that highlights key information and links to key aspects of the application. For example, you can see both the current sync state and the result of the previous sync operation, with links to pull-out panels including additional details. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-app-header.png" - url="/images/whats-new/rel-notes-sep22-app-header.png" - alt="Application header for selected appplication" - caption="Application header for selected appplication" - max-width="80%" -%} - -#### Refresh and hard refresh to manage applications -Just as you can manually synchronize applications directly in Codefresh, you can now perform Refresh and Hard Refresh for applications. -In the Applications dashboard, both options are available in the context menu of each application. On selecting an application, you can see them on the top-right next to the Synchronize button. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-refresh-hardrefresh.png" - url="/images/whats-new/rel-notes-sep22-refresh-hardrefresh.png" - alt="Refresh/Hard refresh options for selected application" - caption="Refresh/Hard refresh options for selected application" - max-width="80%" -%} - - - -#### Click resume indefinitely paused rollouts -Argo Rollouts allows you to pause a rollout indefinitely and resume it manually instead of automatically after a fixed duration. Manually resuming a rollout is generally done through the CLI. -Codefresh provides you the option of resuming an indefinitely paused rollout directly from the Applications dashboard in Codefresh, with a single click. - -In the Timelines tab of the selected application, an ongoing rollout that is indefinitely paused displays the pause button. Resuming the rollout is as simple as clicking the pause button. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-resume-pause.png" - url="/images/whats-new/rel-notes-sep22-resume-pause.png" - alt="Resume indefinitley paused rollout" - caption="Resume indefinitley paused rollout" - max-width="60%" -%} - -#### Custom path for application resources -When creating applications, in addition to changing the name of the manifest, you can now also define the path for the manifest within the Git Source. Use the front slash (/) to add subfolders to the path. The resource is created in the Git Source you select, according to the path you defined. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-app-yaml-location.png" - url="/images/whats-new/rel-notes-sep22-app-yaml-location.png" - alt="Define location for application YAML" - caption="Define location for application YAML" - max-width="60%" -%} - - - -#### Events tab for applications -In the previous month's release, we added the Events panel displaying successful and events for the application. -For more visibility and easier access, the same Events tab is now displayed with the Current State, Timeline, Services, and Configuration tabs for the selected application. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-events-tab.png" - url="/images/whats-new/rel-notes-sep22-events-tab.png" - alt="Events tab for application" - caption="Events tab for application" - max-width="60%" -%} - - -### Bug fixes - -{::nomarkdown} -
                -{:/} - -**Runtimes** -* Incorrect status for Hosted runtime when app-proxy is unreachable. -* Git provider not registered for hosted runtimes with Git Sources defined in the shared configuration repo. -* Authentication failure between platform and app proxy. -* Adding cluster to a runtime shows an error even when the cluster is added to the runtime. -* Duplicate dates in Activity Log notifications. -* Argo CD fails to connect to K8s 1.24 clusters. -* After uninstalling a runtime, argo-rollouts and rollout-reporter files remain for managed cluster remain in shared configuration repo. -* Deleted managed cluster shows as Unknown. - -{::nomarkdown} -
                -{:/} - -**Applications** -* Health status does not change to progressing when previously degraded. -* Wrong git source reference -* Git Source applications in the Applications dashboard not reflected in the Runtimes > Git Source tab. -* Switching from YAML to form view after changing fields does not update validations. -* App details drawer crashes when application does not have resources. -* Missing namespace for resources. -* Full Screen does not work in Safari. -* Recreating an application with the same name as that of a deleted application displays incorrect data for rollouts in the Timeline tab. -* In the Timeline tab, data for a new release with long sync duration is assigned to the previous release. - - -## August 2022 - -### Features & enhancements - -#### GitHub Container Registry -In this release, we added support for GitHub Container Registry (GHCR), a popular container registry tool. The settings for GitHub Container registry integration are identical to that of the other container registry integrations: the integration name, the runtimes to share the integration with, and the domain, username, and token. -You also have the Test Connection option to test credentials before committing the changes. -Once defined, you can reference the integration by name in the CI platforms. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-github-cr.png" - url="/images/whats-new/rel-notes-aug22-github-cr.png" - alt="GitHub Container registry integration" - caption="GitHub Container registry integration" - max-width="70%" -%} - -See [GitHub Container registry]({{site.baseurl}}/docs/integrations/ci-integrations/container-registries/github-cr/). - -#### Labels and annotations for managed clusters -The Codefresh CLI supports labels and annotations for managed clusters. -When you add a managed cluster in Codefresh, you can optionally add labels and annotations with the `--labels` and the `--annotations` flags. Codefresh supports the standard key-value formats for both, with multiple items separated by `,`. K8s rules for labels and annotations are valid here as well. - -See [Adding a managed cluster with Codefresh CLI]({{site.baseurl}}/docs/runtime/managed-cluster/#add-a-managed-cluster-with-codefresh-cli), and [Adding a managed cluster with Kustomize]({{site.baseurl}}/docs/runtime/managed-cluster/#add-a-managed-cluster-with-kustomize). - -#### Event information for application resources -View events for application resources directly in Codefresh. -While the Applications dashboard flags errors in all applications at the global level, the Events tab isolates successful and failed events per resource within an application, useful for resources such as pods. - -Instead of having to navigate to Argo CD to view events for an application resource, clicking the resource in the Current State view in Codefresh displays the Events tab for that resource. Events are displayed in descending order, with the most recent event displayed first. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-events-tab.png" - url="/images/whats-new/rel-notes-aug22-events-tab.png" - alt="Events tab for application in Current State" - caption="Events tab for application in Current State" - max-width="60%" -%} - -#### Quick View for applications -Similar to the detailed views for application resources, Codefresh offers a detailed view also for the application itself. -The Quick View for an application, collates definition, deployment, and event information, in the same location. The information is grouped into tabs for intuitive viewing: Summary, Metadata, Parameters, Sync Options, Manifest, and Events (as in the picture below). - -Easily access the Quick View either by selecting Quick View from the application’s context menu in the Applications dashboard, or by clicking the application resource in the Current State view. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-quickview-events.png" - url="/images/whats-new/rel-notes-aug22-quickview-events.png" - alt="Application Quick View: Events tab" - caption="Application Quick View: Events tab" - max-width="40%" -%} - -See [Application Quick View]({{site.baseurl}}/docs/deployment/applications-dashboard/#application-quick-view). - - - -#### Usability enhancements for applications -**Context menu for applications** -Every application in the Applications dashboard includes a new context menu with access to frequently-used and useful options such as Quick View, synchronize, and edit applications. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-new-app-nav.png" - url="/images/whats-new/rel-notes-aug22-new-app-nav.png" - alt="Tab order on application drilldown" - caption="Tab order on application drilldown" - max-width="70%" -%} - - -**Validations before commit with intuitive error message** -Codefresh validates Source, Destination, and Advanced Settings such as the Argo CD Project, when you create or update applications, _before_ committing the changes. -For easy identification, the section with the error is also highlighted in the Form, not only in the YAML manifest. For example, if the Revision or Path is missing in the General settings, the section is highlighted in red and the message displayed includes details on the possible reasons for the error. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-app-validation-errors.png" - url="/images/whats-new/rel-notes-aug22-app-validation-errors.png" - alt="Validation errors in Form mode for application" - caption="Validation errors in Form mode for application" -max-width="60%" -%} - -#### Miscellaneous changes - -{: .table .table-bordered .table-hover} -| Item | Description | -| ---------- | -------- | -| `CF_HOST` | Deprecated from v 0.0.460 and higher in CI integrations. Recommend using `CF_RUNTIME_NAME` instead. See [CI integrations argument reference]({{site.baseurl}}/docs/integrations/ci-integrations/#ci-integration-argument-reference). | -| `GHCR_GITHUB_TOKEN_AUTHENTICATION` | New value for `CF_CONTAINER_REGISTRY_INTEGRATION` argument. Can be selected for GitHub Container (GHCR) registries even when you don’t have a GHCR integration in Codefresh. See [GitHub Action-Codefresh integration arguments]({{site.baseurl}}/docs/integrations/ci-integrations/github-actions/#github-action-codefresh-integration-arguments).| - - - -### Bug fixes - -**Runtimes** -* Uninstalling runtime does not remove the integrations shared with the runtimes. -* Uninstalling a hosted or hybrid runtime does not remove it from the shared configuration repository. -* Unable to install Argo Rollouts on clusters with long cluster names. -* Empty Argo CD logs with "http internal error" in Codefresh. -* 500 status code on using default GKE/EKS context/cluster names. - -**Applications** -* Trying to commit an application that already exists results in a commit failure. - -**Images** -* Filters are not retained on navigating away from the Images dashboard. - -**Pipelines, workflows and Workflow Templates** - -* Workflow Template filter does not work for Git Source. -* Missing validation for `WORKFLOW_NAME` variable. -* Incorrect sync history date for Workflow Templates. -* Error on detaching predefined filters in pipelines. - -**Integrations** -* Docker Hub integration list appears empty until refreshed even when there are integrations. -* Test Connection option disabled when integration name is not defined. - - - - -## July 2022 - -### Features & enhancements - -#### Hosted GitOps -Codefresh has launched Hosted GitOps, our newest offering, a hosted and managed version of Argo CD. - -From application analytics, to application creation, rollout, and deployment, you get the best of both worlds: Argo CD with Codefresh's advanced functionalities and features for CD operations. -What it also means is easy set up and zero maintenance overhead. - -Read on for a summary of what you get with Hosted GitOps. - -**Hosted runtime** -Hosted GitOps supports hosted runtimes. The runtime is hosted on a Codefresh cluster and managed by Codefresh. Codefresh guides you through the three-step process of setting up your hosted environment. Read more in [Hosted runtime](#hosted-runtime). - -**Dashboards for visibility and traceability** -Here's a recap of Codefresh dashboards, including a brand new dashboard dedicated to DORA metrics: -* Home dashboard: For global analytics and system-wide deployment highlights, start with the Home dashboard. -* DORA metrics: A _new_ dashboard for DORA metrics and DevOps quantification. Read more in [DORA metrics](#dora-metrics). -* Applications dashboard: Easily track deployments and visualize rollouts across clusters and runtimes in the Applications dashboard. - -**Application lifecycle management** -Manage the entire application lifecycle directly in Codefresh, from creating, editing, and deleting applications. -Define all application settings in a single location through the intuitive Form mode or directly in YAML, and commit all changes to Git. - -Synchronize applications manually when needed. Read more in [On-demand app synchronization](#on-demand-app-synchronization). - -**Integrations for image enrichment** -With Hosted GitOps, you can integrate your CI tools with Codefresh for image enrichment. Read more in [Integrations for image enrichment](#integrations-for-image-enrichment) - -{::nomarkdown} -
                -{:/} - -#### Hosted runtime -Hosted GitOps supports a GitHub-based SaaS runtime, hosted on a Codefresh cluster, and managed by Codefresh. -Setting up your hosted environment takes just a few clicks. All you need is a Codefresh account, a Git account, and a Kubernetes cluster to which to deploy your applications. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-hosted-initial-view.png" - url="/images/whats-new/rel-notes-jul22-hosted-initial-view.png" - alt="Hosted runtime setup" - caption="Hosted runtime setup" - max-width="80%" -%} - -Codefresh guides you through the simple three-step process of provisioning your hosted runtime. From that point, Codefresh handles administration and maintenance of the hosted runtime, including version and security updates. - -See [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -{::nomarkdown} -
                -{:/} - -#### DORA metrics -DORA metrics have become integral to enterprises wanting to quantify DevOps performance, and Codefresh has out-of-the-box support for it. - -The DORA dashboard in Codefresh goes beyond quantification, with features such as the Totals bar displaying key metrics, filters that allow you to pinpoint just which applications or runtimes are contributing to problematic metrics, show metrics for starred applications, and the ability to set a different view granularity for each DORA metric. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-dora-metrics.png" - url="/images/whats-new/rel-notes-jul22-dora-metrics.png" - alt="DORA metrics" - caption="DORA metrics" - max-width="60%" -%} - -See [DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/). - -{::nomarkdown} -
                -{:/} - -#### Integrations for image enrichment -If you have our Hosted GitOps for CD and a different tool for CI, you can continue to enrich images, retaining your CI tools. Allow Codefresh to retrieve and report the image information in your deployments by connecting your CI tools to Codefresh. Connect CI tools, issue tracking tools, container registries, and more. - - -This release introduces our integration offering, starting with: -* GitHub Actions, Jenkins, and Codefresh Classic for CI -* Jira for issue tracking -* Docker Hub, Quay, JFrog Artifactory for container registries - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-github-action-settings.png" - url="/images/whats-new/rel-notes-jul22-github-action-settings.png" - alt="Image enrichment with GitHub Actions integration" - caption="Image enrichment with GitHub Actions integration" - max-width="60%" -%} - - We are continually expanding the range of integrations, so stay tuned for release announcements on new integrations. - -Codefresh encrypts the credentials for every integration you create, and stores them securely as Kubernetes Sealed Secrets, ensuring that the integration flow is completely GitOps-compatible. Pipelines reference the integration by the integration name instead of integration credentials. Codefresh retrieves enrichment information using the encrypted Kubernetes secrets. - -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). - -{::nomarkdown} -
                -{:/} - -#### Edit and delete applications - -Application management has become easier as you can now edit and delete applications directly in Codefresh. - -Update General and Advanced settings for application. Go directly to the Configuration tab for the application by selecting Edit in the Applications dashboard. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-edit-app-option.png" - url="/images/whats-new/rel-notes-jul22-edit-app-option.png" - alt="Edit application option" - caption="Edit application option" -max-width="80%" -%} - -The Delete application option is available when you select an application. -Codefresh warns you of the implication of deleting the selected application in the Delete form based on the Prune resource setting. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-delete-app.png" - url="/images/whats-new/rel-notes-jul22-delete-app.png" - alt="Delete application" - caption="Delete application" -max-width="50%" -%} - -See [Update application configuration]({{site.baseurl}}/docs/deployment/create-application/#update-application-configuration) and [Delete an application]({{site.baseurl}}/docs/deployment/create-application/#delete-an-application). - -{::nomarkdown} -
                -{:/} - -#### On-demand app synchronization -Manually synchronize applications whenever needed directly from Codefresh. The synchronize option is a significant enhancement to the application lifecycle management options that we already support in Codefresh. - -The set of options for application synchronization are identical to that of Argo CD. For usability, they are grouped into two sets: Revision and Additional Options. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-sync-app.png" - url="/images/whats-new/rel-notes-jul22-sync-app.png" - alt="Synchronize application" - caption="Synchronize application" - max-width="60%" -%} - -{::nomarkdown} -
                -{:/} - -#### Activate access for Codefresh support -User Settings include an option to allow Codefresh support personnel account access for troubleshooting purposes. The option is disabled by default. When enabled, access is always coordinated and approved, and all actions are audited. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-account-access.png" - url="/images/whats-new/rel-notes-jul22-account-access.png" - alt="Enable account access" - caption="Enable account access" - max-width="80%" -%} - -See [Enable access for Codefresh support]({{site.baseurl}}/docs/administration/user-settings/#enable-access-for-codefresh-support). - -{::nomarkdown} -
                -{:/} - -#### View logs by container -When viewing logs for applications and workflows, you can now select the container for which to display them. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-log-container.png" - url="/images/whats-new/rel-notes-jul22-log-container.png" - alt="View logs by container" - caption="View logs by container" - max-width="50%" -%} - -### Bug fixes -**Runtimes** -* Unable to remove managed cluster on failure to add shared configuration repository. -* Maximum character limit not validated in cluster names. -* Failure when downloading logs for all runtime components. -* New cluster automatically assigned Unknown status. -* Sealed secret remains in cluster after uninstalling runtime. -* Unable to view rollouts on managed cluster. - - -**Applications** - -* Resources without namespaces (such as cluster role) do not open in Current State. -* Sync state icon frozen when syncing the application. -* Application created with the same name as deleted application displayed as new deployment. -* No error when creating an application with the same name as an existing application. -* Applications dashboard does not display an application with incorrect Source. -* Applications dashboard does not display the Jira issue for Docker image. -* Sync policy appears as Manual though set to automatic. -* Sync error message partially cut off. -* Application release does not always return binaryId, and repositoryName for transition images. -* Application name not displayed in sync errors. - -**Images** -* Registry filter used with other filters returns wrong results. -* Find query for image applications. - - -**Other** - -* Unable to view, access, and add SSO integrations. -* Failure on sealing key management check. -* Home dashboard: Most active pipelines and Delivery Pipelines displayed not aligned with the Time filter. -* Incorrect sorting for workflow and pipeline lists. - - - -## June 2022 - -### Features & enhancements - -#### Shared configuration for runtimes -Define configuration settings for a runtime once, and reuse the configuration settings for multiple runtimes in the same account. Reduce time needed to define and maintain configuration settings for every runtime separately. - -After defining the repository in which to store configuration settings, you can reference the repository, selectively from specific runtimes, or from all runtimes, and share the configuration. - -Older versions of hybrid runtimes without the shared repository must be upgraded to the latest version to leverage the shared configuration, specifically for integrations with CI platforms and tools. - -For details, see [Shared runtime configuration]({{site.baseurl}}/docs/runtime/shared-configuration/). - -#### Logs for runtime components -View and download logs for runtimes and runtime components. The logs record events from the time of application launch for all resources in the application. - -Download logs for offline viewing and analysis, or view logs per component online, and download as needed: - -* Download all logs: Available for every runtime for download as a single `.tar.gz` file, including the different log files for each runtime component. -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-runtime-logs-all.png" - url="/images/whats-new/rel-notes-june22-runtime-logs-all.png" - alt="Download all logs for a runtime" - caption="Download all logs for a runtime" - max-width="60%" - %} - -* View/download logs per component: Available for every runtime component. View online logs, displaying up to 1000 lines of the most recent events. Locate information with free-text search, and navigate between search results using the next/previous buttons. Enhance readability by turning on line-wrap when needed. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-runtime-log-screen.png" - url="/images/whats-new/rel-notes-june22-runtime-log-screen.png" - alt="View logs online for runtime component" - caption="View logs online for runtime component" - max-width="60%" - %} - -For details, see [View/download runtime logs]({{site.baseurl}}/docs/runtime/download-runtime-logs/). - -#### OAuth2 authentication -OAuth (Open Authorization) 2.0 has become an industry standard for online authorization. Codefresh supports connections to your Git provider using OAuth2. Codefresh integrates with Git to sync repositories to your clusters, implement Git-based actions when creating resources such as Delivery Pipelines, and to enrich Images with valuable information. - -Codefresh provides a default, predefined OAuth2 application for every runtime. As an account administrator in Codefresh, you can optionally create an OAuth2 Application in GitHub and set up authentication within Codefresh. Users in Codefresh can then authorize access to GitHub with OAuth2, instead of with a personal access token. - -For details, see [Set up OAuth2 authentication]({{site.baseurl}}/docs/administration/oauth-setup/). - - -#### Application resources in Tree view -The Tree view of the Current State complements the List view of the same in the Applications dashboard. Similar to the List view, the Tree view also displays all the resources deployed for an application, with additional unique features. - -What is unique about the Tree view? -First, the Tree view simplifies visualization of and tracking resources for any deployment, think complex deployments with hundreds of resources. Second, it is designed to impart key information for each resource at a glance. Every resource shows its health status (color-coded border), sync state (icon prefixed to name), and metadata on mouse-over. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-tree-view.png" - url="/images/whats-new/rel-notes-june22-tree-view.png" - alt="Application Current State: Tree view" - caption="Application Current State: Tree view" - max-width="60%" - %} - - -**Progressive discovery** - -By the very nature of its design, the Tree View allows progressive discovery. View all resources at once, or start with a parent resource, and expand it to view siblings and children to understand how they are connected. - -**Resource filters** - -The filters in the List view are available also in the Tree view. These global filters help narrow the scope of the resources displayed, by kind, health status, and sync state. The filters set in either the List or Tree vies are retained when navigating between them. - -**Resource search and find** - -The Search option lets you locate resources by searching for any part of the resource name. Similar to the filters, search results are also retained when navigating between Tree and List views. -For quick search, use the Find option to locate and navigate to required resources. - -**Resource inventory** - -At the bottom-left, the resource inventory summarizes your deployment in numbers per resource kind. Syncing and Out-of-Sync resources for each kind are bucketed separately, for visibility, and for quick access to filter resources by these states. - -**Resource manifest and logs** - -In addition to the metadata on mouse-over for a resource, clicking a resource shows its manifests and logs based on the resource type. View and compare the Desired and Live states for managed resources in Git repositories. -Another usability enhancement is the ability to share resource details by copying the URL and sending it to others in your organization for collaborative review. - -Logs are displayed if the resource has logs: - -* For online viewing, you have free-text search and line-wrap functionalities. -* For offline viewing and analysis, you can download the complete log into a text file. - -For details, see [Current State Tree view]({{site.baseurl}}/docs/deployment/applications-dashboard/view-modes-for-application-resources/#working-with-resources-in-tree-view). - -#### Application rollout visualization -In addition to installing Argo Rollouts in your cluster, visualize Argo Rollout history and progress directly in the Applications (deployment) dashboard. Visualize rollouts from multiple clusters and runtimes in a single centralized location through the Deployment tab. - -**Rollout progress** -Ongoing rollouts show the progress of the rollout in the real time. Completed rollouts show the switch to the new version according to the deployment strategy. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-rollout-in-progress.png" - url="/images/whats-new/rel-notes-june22-rollout-in-progress.png" - caption="Application Rollout: Progress visualization" - max-width="60%" - %} - -**Rollout steps** - -As the rollout occurs, visualize step-by-step progress. Expanding Background Analysis displays metric queries and the manifest of the analysis template. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-rollout-analysis.png" - url="/images/whats-new/rel-notes-june22-rollout-analysis.png" - caption="Application Rollout: Steps visualization" - max-width="30%" - %} - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-rollout-query-metrics.png" - url="/images/whats-new/rel-notes-june22-rollout-query-metrics.png" - caption="Application Rollout: Query metrics" - max-width="30%" - %} - -For details, see [Rollout progress and step visualization]({{site.baseurl}}/docs/deployment/applications-dashboard/#monitor-rollouts-by-deployment). - -#### Nested workflows -Add nested workflow functionality to Codefresh pipelines. A nested workflow is a step within the parent workflow that either submits a new workflow, or creates a PR (Pull Request) that runs a different workflow based on the PR result. - -Nested workflows run independently of the parent workflow that submitted them. A nested submit workflow has traceability in both directions, from the parent to child, and from the child to the parent. A workflow triggered by a nested PR identifies the PR that triggered it. - -Here’s an example of a parent workflow that submits two nested workflows, and the link back to the parent workflow from one of the child workflows. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jun22-nested-parent-submit.png" - url="/images/whats-new/rel-notes-jun22-nested-parent-submit.png" - caption="Parent workflow with two nested submit workflows" - max-width="60%" - %} - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jun22-nested-child-submit.png" - url="/images/whats-new/rel-notes-jun22-nested-child-submit.png" - caption="Child submit workflow with link to parent workflow" - max-width="60%" - %} - -The Codefresh Hub for Argo has two ready-to-use Workflow Templates that: - -* Submits a workflow -* Creates a PR to run the workflow that tracks the PR - -For details, see [Nested workflows]({{site.baseurl}}/docs/pipelines/nested-workflows/). - -### Bug fixes -**Runtimes** - -* Encrypted Git integration remains when uninstalling runtime through the CLI, and decryption through app-proxy fails. -* Rollback occurs during installation via CLI. -* Runtime ignores –Demo resources=false flag install confirmation. -* Installation via CLI stops when demo resources are not installed even when –demo -resources flag is set to false. -* No errors during installation via CLI when flags are incorrectly located. -* Runtime name with health or sync errors not highlighted in Codefresh UI. - -**Images** - -* Empty pages on changing filters in page two or higher. -* Broken link for an image not in logged-in user account. -* Images view not updated with current application with rollout resource. - -**Applications** - -* Lock out due to slow application load. -* Application dashboard remains frozen in Progressing state. -* Application dashboard > Timeline tab: - - * Default view not restored on removing date range defined in the Timeline tab. - * Order of deployments in the chart not identical to the list of rollouts. - * Committer for GitOps change missing in Commit information. - * Missing commit message for SHA link. - * Changes to an image tag not reflected. - * Rollout shows as in progress even after deployment status is healthy. - * New release in Argo CD not displayed in Codefresh UI when latest release was degraded without previous rollout data. - * Rollout YAML unavailable when application source is a Helm repo. -* Applications dashboard > Services tab: - - * Progressing rollout with manual traffic management returns empty Services list. -* Applications dashboard > Current State - * Resource tree/list not restored on removing filters. - - -**Pipelines** - -* Selecting an existing Workflow Template creates a new Workflow Template. -* Incorrect line numbers for pipeline template in Form mode. - - -## May 2022 - -### Features & enhancements - -#### Runtime disaster recovery -Runtimes are integral to all CI/CD actions and operations in Codefresh. In this release, we added the capability to restore runtimes in case of cluster failures, either partial or complete. -All you need is the existing Git repo where you installed the runtime containing the runtime resources. The restore process reinstalls the runtime, leveraging the resources in the existing repo. You can choose to restore the runtime to the failed cluster or to a different cluster. -For details, see [Restore runtimes]({{site.baseurl}}/docs/runtime/runtime-recovery/). - -#### AWS ALB ingress controller -AWS Application Load Balancer (ALB) is now part of our list of supported ingress controllers. -For details, see Ingress controller requirements in [Requirements]({{site.baseurl}}/docs/runtime/requirements/#ingress-controller), and [Post-installation configuration]({{site.baseurl}}/docs/runtime/installation/#post-installation-configuration). - - -#### Labels for runtime namespace -When installing runtimes, the `--namespace-label` flag lets you add labels to the runtime namespace. The labels identify and grant access to the installation network, required with service mesh ingress controllers such as Istio. -For both CLI-based and silent installations, add the flag followed by one or more labels in `key=value` format. Note that these labels must be identical to those defined in the 'namespace' resource spec. -For details, see [Runtime installation flags]({{site.baseurl}}/docs/runtime/installation/#runtime-installation-flags). - -#### Internal and external ingress hosts -Codefresh runtimes support defining two ingress hosts, an internal and an external ingress host, for private and public networks. Previously, runtimes supported a single ingress host for both the app-proxy and webhook ingress resources. Internal and external ingress separation allows you to expose the Codefresh app-proxy service only within your private network, while keeping the webhook ingress unchanged. -* New runtime installations: The `--internal-ingress-host` flag lets you can define an ingress host for communication with the app-proxy. For details, see [Runtime installation flags]({{site.baseurl}}/docs/runtime/installation/#runtime-installation-flags). -* Existing runtimes: To add an internal ingress host, you need to commit changes to the installation repository by modifying `app-proxy ingress` and `.yaml`. -For details, see _Internal ingress host configuration (optional)_ in [Post-installation configuration]({{site.baseurl}}/docs/runtime/installation#post-installation-configuration). - -For further customizations, add annotations for internal and external ingress hosts through the `--internal-ingress-annotation` and `--external-ingress-annotation` flags. - -#### oktapreview domain support -You can set up Okta SSO to log into your Okta preview environment. - -#### Git Source enhancements -A common scenario when using Git repositories for CI/CD is to include or exclude specific files or directories in the target repository from the destination repo or cluster. When creating or editing Git Sources in Codefresh, you can now include or exclude folders and files in the target Git repo, using Glob patterns for the same. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-may22-git-source-exclude-include.png" - url="/images/whats-new/rel-notes-may22-git-source-exclude-include.png" - alt="Include/exclude options in Git Source" - caption="Include/exclude options in Git Source" - max-width="50%" - %} - -You can also delete Git Sources if needed. Selecting additional actions for a Git Source, displays the Git Source details with the Delete option. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-may22-git-source-delete.png" - url="/images/whats-new/rel-notes-may22-git-source-delete.png" - alt="Delete Git Source" - caption="Delete Git Source" - max-width="90%" - %} - -For details, see [Add and manage Git Sources]({{site.baseurl}}/docs/runtime/git-sources/). - -### Bug fixes -**Runtimes** - -* With Istio ingress, app proxy communication with Argo CD fails with `Unexpected token u in JSON error`. -* Adding a managed cluster always commits manifests to the main branch and not to the defined default branch. -* Add managed cluster command fails when ingress host includes `/` suffix. -* Application groups not supported in Current State for older runtime versions. -* Retrieving a list of Git Sources for a runtime via CLI, causes the CLI to crash. -* Uninstalling a runtime does not remove runtime-related secrets from the cluster. - -**Applications** - -* Applications deleted from the Argo UI not removed from the Applications dashboard in Codefresh. -* Back button in Applications > Timeline tab does not work. -* Hierarchy for AppSet application created in Argo CD not rendered correctly in Codefresh. -* Most Active Applications list in the Home dashboard is incorrectly sorted. -* Link to CI build on Service in Applications Dashboard is hard-coded to Workflows. -* Add Application wizard creates invalid manifest. -* Removing a resource from an application does not remove it from the application’s Current State list. -* Deleting an application deletes it from the cluster and the Git repo, but not from the database. -* Creating an application without path results in an error. -* On page reload, deployment chart in Application > Timeline tab does not reflect set filters. -* Resources with changed file names are not reported in Argo CD. -* Unknown state for application sets with targets on external clusters. - -**Others** -* Clicking the Settings icon shows a console error. -* Workflow Templates reported without Git manifests and desired state. -* Get list of workflows for a pipeline via CLI returns 400 bad request. -* GitHub user without a public email address causes autopilot to crash in app-proxy. -* Within a staging app, regular deployment transition is empty and shows only replicas count. - - -## March-April 2022 - -### Features & enhancements - -#### Kubernetes version runtime support -We now support the latest Kubernetes server versions, 1.22 and 1.23. - -#### Ingress controllers -We are continually working on supporting additional Ingress controllers, and this release adds support for: -* Ambassador -* NGINX Enterprise -* Istio -* Traefik - -All ingress controllers must be configured to report their status. -For details, see [Ingress controller requirements]({{site.baseurl}}/docs/runtime/requirements/#ingress-controller). - - -#### Argo CD managed cluster support -Argo CD can manage clusters without Argo CD installed on them. Now you have the same functionality in Codefresh, to add, view, and manage remote clusters. -Admins can add an external cluster to a Codefresh runtime, and register it automatically as a managed cluster. From that point on, you have complete visibility into health and sync status, and options to manage them, including installing Argo Rollouts. - -With managed clusters in Codefresh, you get: -* Streamlined management: All cluster- and cluster-component level operations are managed through the runtime, in a centralized location. You can install new components, uninstall existing components, and remove the cluster from the runtime's managed list. A single click installs Argo Rollouts on the managed cluster. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-argo-rollouts.png" - url="/images/whats-new/rel-notes-argo-rollouts.png" - alt="Install Argo Rollouts for managed cluster in topology view" - caption="Install Argo Rollouts for managed cluster in topology view" - max-width="70%" - %} - -* Seamless upgrades: Upgrades to runtimes or to runtime components in the local cluster automatically upgrades those in managed clusters as well. -* Integration with dashboards: Applications dashboards reflect deployment information for applications in all managed clusters. When Argo Rollouts are installed, application rollouts are also reported to the dashboard. - -For details, see [Managed clusters]({{site.baseurl}}/docs/runtime/managed-cluster). - -#### Topology views for runtimes - -Get a visual representation of the runtimes in your deployments, managed clusters, and cluster components with the Topology view for runtimes. -Quickly identify key information such as health and sync status, and version. -Add new clusters to or remove existing clusters from runtime management. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-runtime-topology-view.png" - url="/images/whats-new/rel-notes-runtime-topology-view.png" - alt="Runtime topology view" - caption="Runtime topology view" - max-width="70%" - %} - -For details, see [Topology view for runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/#topology-view). - -#### Analytics dashboard -In addition to Delivery Pipelines, the Analytics dashboard shows Runtimes, Managed Clusters, Deployments, and Applications, to give you the complete CI/CD picture with key facts and insights. - -**Usability enhancements** - * Global filters are now located at the top of the dashboard. - * Resource-specific filters are available for that resource. - * A convenient View button takes you to the dedicated resource view for additional analysis. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-analytics-dashboard.png" - url="/images/whats-new/rel-notes-analytics-dashboard.png" - alt="Analytics dashboard" - caption="Analytics dashboard" - max-width="70%" - %} - -#### Applications dashboard -The Applications dashboard displays the individual deployments across your enterprise. Here are the main enhancements: - -**Application inventory and status filters** - - The health status snapshot in the Applications dashboard also works as a quick filter. Selecting a status filters applications by that status. - Filter criteria that match child applications automatically expands the parent application to show the child applications. - - {% include - image.html - lightbox="true" - file="/images/whats-new/app-dashboard-status-filter.png" - url="/images/whats-new/app-dashboard-status-filter.png" - alt="Applications dashboard: Filter by status" - caption="Applications dashboard: Filter by status" - max-width="70%" - %} - -**Rollouts** - - Intuitive visualization with the option to open the Images view in a new browser window. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-apps-open-image.png" - url="/images/whats-new/rel-notes-apps-open-image.png" - alt="Applications dashboard: Link to Image view" - caption="Applications dashboard: Link to Image view" - max-width="70%" - %} - -**Git committers** - Hovering over an avatar shows all commits made by that committer. - - -**Current state of cluster resources** - Hierarchical representation of the resources deployed by this application in the cluster. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-app-current-state.png" - url="/images/whats-new/rel-notes-app-current-state.png" - alt="Applications dashboard: Current State" - caption="Applications dashboard: Current State" - max-width="70%" - %} - -#### Workflow Templates -Codefresh provides full-fledged management for the Workflow Template resource, from optimizing existing Workflow Templates, to creating new ones, and testing Workflow Templates before commit. - - {% include - image.html - lightbox="true" - file="/images/whats-new/wrkflow-template-main.png" - url="/images/whats-new/wrkflow-template-main.png" - alt="Workflow Templates" - caption="Workflow Templates" - max-width="70%" - %} - -**Create, test, and optimize Workflow Templates** - Create Workflow Templates in three steps. Start by selecting one from the Codefresh Hub for Argo, or start with a blank template form. Customize the Workflow Template, and either run the template to test it or commit to submit it. - - {% include - image.html - lightbox="true" - file="/images/whats-new/wrkflow-template-add.png" - url="/images/whats-new/wrkflow-template-add.png" - alt="Add Workflow Template panel" - caption="Add Workflow Template panel" - max-width="50%" - %} - - For both new and existing Workflow Templates, the **Run** option enables you to test a new template, or changes to an existing template, without needing to first commit the changes. If the Workflow Template has previous iterations, you can view the arguments and values used in those iterations. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-wrkflow-temp-manifest-run.png" - url="/images/whats-new/rel-notes-wrkflow-temp-manifest-run.png" - alt="Run option for Workflow Templates" - caption="Run option for Workflow Templates" - max-width="70%" - %} - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-wrkflow-temp-run-args-view.png" - url="/images/whats-new/rel-notes-wrkflow-temp-run-args-view.png" - alt="Run Workflow Template: Arguments list" - caption="Run Workflow Template: Arguments list" - max-width="40%" - %} - - The Workflows and Delivery Pipelines tabs associated with the selected Workflow Template are displayed in the respective tabs, giving you all the information in the same location. - - -**Rename Workflow Template** - After creating a Workflow Template, you can rename it by selecting the template and clicking **Rename**. - The new name must be unique within the cluster. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-rename-workflow-template.png" - url="/images/whats-new/rel-notes-rename-workflow-template.png" - alt="Rename Workflow Template" - caption="Rename Workflow Template" - max-width="70%" - %} - - -#### Application creation wizard - -Create applications that are fully GitOps-compliant from the Codefresh UI. The application manifest is generated, committed to Git, and synced to your cluster. -When creating the application, you can use the UI forms, or edit the manifest directly. - - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-app-create-settings.png" - url="/images/whats-new/rel-notes-app-create-settings.png" - alt="Application settings in application creation wizard" - caption="Application settings in application creation wizard" - max-width="70%" -%} - - -#### Delivery Pipeline flows -The Delivery Pipeline flow features several usability and functionality enhancements. - -**Seamless integration of Argo Event information with Argo Workflows** - - Once a workflow is submitted for a Delivery Pipeline, the Workflows tab visualizes the connections between the steps in the workflow. - With Argo Event information for the workflow also incorporated into the visualization, you have a unified view of Argo Events and Argo Workflows in one and the same location, the events that triggered the workflow combined with the workflow itself. - - The Event Source manifest, the event payload, and the Sensor manifest are displayed as pull-out panels, allowing you to easily copy paths for attributes from event payloads, view logs, and download artifacts. - - This example shows the event payload from Argo Events for the workflow. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-event-payload.png" - url="/images/whats-new/rel-notes-event-payload.png" - alt="Panel with Event Payload in Workflows tab" - caption="Panel with Event Payload in Workflows tab" - max-width="70%" -%} - - This example shows the sensor manifest from Argo Events for the workflow. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-event-srce-manifest.png" - url="/images/whats-new/rel-notes-event-srce-manifest.png" - alt="Panel with Sensor manifest in Workflows tab" - caption="Panel with Sensor manifest in Workflows tab" - max-width="70%" -%} - -**Rename trigger resource** - - Similar to Workflow Templates, you can now change the trigger name of a Delivery Pipeline. The sensor name cannot be changed. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-rename-pipeline-trigger.png" - url="/images/whats-new/rel-notes-rename-pipeline-trigger.png" - alt="Rename trigger option for Delivery Pipeline" - caption="Rename trigger option for Delivery Pipelines" - max-width="70%" -%} - -**Git repo selection for commits** - - A dropdown list allows you to select one or more Git repos in the Trigger Conditions tab. Start typing, and use autocomplete to view and select from the available Git repos. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-git-repo-select.png" - url="/images/whats-new/rel-notes-git-repo-select.png" - alt="Git repo selection for Delivery Pipelines" - caption="Git repo selection for Delivery Pipelines" - max-width="70%" -%} - - -**Errors/warning in manifests synced with the line number in manifest** - - Clicking the line number next to an error or a warning changes focus to the line in the manifest file with the error or warning. - - -#### Workflows dashboard enhancements - -**Link from workflows to their pipelines** - - Workflow names in the dashboard are clickable links. Clicking a workflow name takes you directly to the pipeline associated with that workflow. - -**New status for active workflows without events** - -Identify workflows that are active but do not have any execution data with the new status filter in the Workflows dashboard. Filtering by Status ‘Unknown’ shows workflows without events for the last hour. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-workflow-unknown-status.png" - url="/images/whats-new/rel-notes-workflow-unknown-status.png" - alt="Unknown status filter for workflows" - caption="Unknown status filter for workflows" - max-width="50%" -%} - -#### Image reporting with Docker config.json -You can now also authenticate to a Docker registry using `docker./config.json` to report image information. Note that `config.json` is not currently supported for GCR, ECR, and ACR. -For more information on the required fields, see [Report image info](https://github.com/codefresh-io/argo-hub/blob/main/workflows/codefresh-csdp/versions/0.0.6/docs/report-image-info.md){:target="\_blank"}. - - -#### OpenShift 4.8 support -CSDP supports Red Hat OpenShift 4.8. For detailed information, read their [blog](https://cloud.redhat.com/blog/red-hat-openshift-4.8-is-now-generally-available#:~:text=OpenShift%204.8%20improves%20the%20bare,is%20now%20shipping%20with%20OpenShift){:target="\_blank"}. - -### Bug fixes - -**Applications dashboard** - -* Inaccurate results when filtering by Application type. -* Cluster shows the address of the Argo CD cluster instead of the target cluster. -* Broken Commit link in Application Preview. -* Filter by favorites does not show ApplicationSets. -* Releases not ordered correctly. -* Missing tags for Application/AppllicationSet. -* Loop created on changing date in the Applications dashboard. -* Rollouts in Deployment chart not aligned with the actual order of rollouts. -* Missing current release label. -* Missing commit message -* JIRA annotations not displayed for Images in Docker.io. -* Avatars show up intermittently. -* Incorrect Committers in Applications dashboard. -* Performance issues. - -**Images** - -* Duplicate applications in Images repositories with different tags. -* Unmarked deployed images. - -**Pipelines** - -* Empty event-sources. -* Missing created/updated/deleted status for resources. -* Event mapping issues. -* Creating a new pipeline with an existing Template shows empty Template tab. - -**Upgrade** - -* Agent upgrade overrides configuration in previous release. - -**Uninstall** - -* Artifacts in database after uninstalling with `--force` flag. -* Uninstallation issues with newer K8s versions. - - diff --git a/_docs/pipelines/concurrency-limit.md b/_docs/workflows/concurrency-limit.md similarity index 98% rename from _docs/pipelines/concurrency-limit.md rename to _docs/workflows/concurrency-limit.md index 780a53a4d..405361dd7 100644 --- a/_docs/pipelines/concurrency-limit.md +++ b/_docs/workflows/concurrency-limit.md @@ -1,13 +1,13 @@ --- title: "Selectors for concurrency synchronization" description: "" -group: pipelines +group: workflows toc: true --- Argo Workflows has a synchronization mechanism to limit parallel execution of specific workflows or templates within workflows, as required. -The mechanism enforces this with either semaphore or mutex synchronization configurations. For detailed information, see [Synchronization](https://argoproj.github.io/argo-workflows/synchronization/). +The mechanism enforces this with either semaphore or mutex synchronization configurations. For detailed information, see [Synchronization](https://argoproj.github.io/argo-workflows/synchronization/){:target="\_blank"}. Codefresh supports an additional level of concurrency synchronization, with _selectors_, for both workflows and templates. @@ -163,4 +163,6 @@ synchronization: - synchronization-wf-8lf9b semaphore: argo/ConfigMap/semaphore-config/workflow?repository=denis-codefresh/argo-workflows&branch=feature ``` +## Related articles +[Creating workflows]({{site.baseurl}}/docs/workflows/create-pipeline) diff --git a/_docs/pipelines/configure-artifact-repository.md b/_docs/workflows/configure-artifact-repository.md similarity index 98% rename from _docs/pipelines/configure-artifact-repository.md rename to _docs/workflows/configure-artifact-repository.md index 894ec98c4..3f4b60578 100644 --- a/_docs/pipelines/configure-artifact-repository.md +++ b/_docs/workflows/configure-artifact-repository.md @@ -1,7 +1,7 @@ --- title: "Configure artifact repository" description: "" -group: pipelines +group: workflows toc: true --- @@ -179,3 +179,6 @@ As the final step in configuring the artifact repository, for the `argo-server` 1. Wait for the configuration changes to take effect on the cluster. 1. Check the `argo-server` service account and verify that it is updated with the user-provided `annotation`. 1. Select the `argo-server-<#>` pod or pods and delete them. + +## Related articles +[Creating workflows]({{site.baseurl}}/docs/workflows/create-pipeline) \ No newline at end of file diff --git a/_docs/pipelines/create-pipeline.md b/_docs/workflows/create-pipeline.md similarity index 97% rename from _docs/pipelines/create-pipeline.md rename to _docs/workflows/create-pipeline.md index 0f11bf6ad..60cb80760 100644 --- a/_docs/pipelines/create-pipeline.md +++ b/_docs/workflows/create-pipeline.md @@ -1,7 +1,7 @@ --- -title: "Pipeline creation" +title: "Creating workflows" description: "" -group: pipelines +group: workflows toc: true --- @@ -33,7 +33,7 @@ An intuitive selection mechanism enables you to easily select and configure each ### Delivery Pipeline creation flow Here's a high-level overview of the Delivery Pipeline creation flow. -For step-by-step instructions, see [How to: create a Delivery Pipeline]({{site.baseurl}}/docs/pipelines/create-pipeline/#how-to-create-a-delivery-pipeline). +For step-by-step instructions, see [How to: create a Delivery Pipeline]({{site.baseurl}}/docs/workflows/create-pipeline/#how-to-create-a-delivery-pipeline). 1. Define pipeline name and select Workflow Template to execute 1. Define default values for pipeline workflow template arguments @@ -56,7 +56,7 @@ In the Delivery Pipeline wizard, we have our starter Workflow Template to use as -> To share artifacts between steps in workflows, and to view archived logs for completed workflows, you must [configure an artifact repository in Codefresh]({{site.baseurl}}/docs/pipelines/configure-artifact-repository). +> To share artifacts between steps in workflows, and to view archived logs for completed workflows, you must [configure an artifact repository in Codefresh]({{site.baseurl}}/docs/workflows/configure-artifact-repository). @@ -278,3 +278,7 @@ Follow the step-by-step instructions to guide you through Delivery Pipeline wiza Codefresh commits the pipeline to the Git repository, and then syncs it to the cluster. Wait a few seconds for the sync to complete, and verify that the pipeline is displayed in the [Delivery Pipelines](https://g.codefresh.io/2.0/pipelines){:target="\_blank"} page. +## Related articles +[Selectors for concurrency synchronization]({{site.baseurl}}/docs/workflows/concurrency-limit) +[Nested workflows]({{site.baseurl}}/docs/workflows/nested-workflows) +[Configure artifact repository]({{site.baseurl}}/docs/workflows/configure-artifact-repository) diff --git a/_docs/pipelines/nested-workflows.md b/_docs/workflows/nested-workflows.md similarity index 99% rename from _docs/pipelines/nested-workflows.md rename to _docs/workflows/nested-workflows.md index 7264b1bf4..7539c44da 100644 --- a/_docs/pipelines/nested-workflows.md +++ b/_docs/workflows/nested-workflows.md @@ -1,7 +1,7 @@ --- title: "Nested workflows" description: "" -group: pipelines +group: workflows toc: true --- diff --git a/_docs/pipelines/sharing-file-system.md b/_docs/workflows/sharing-file-system.md similarity index 99% rename from _docs/pipelines/sharing-file-system.md rename to _docs/workflows/sharing-file-system.md index 12722a432..c01e0125d 100644 --- a/_docs/pipelines/sharing-file-system.md +++ b/_docs/workflows/sharing-file-system.md @@ -1,7 +1,7 @@ --- title: "Sharing file systems" description: "" -group: pipelines +group: workflows toc: true --- diff --git a/_docs/pipelines/workflows.md b/_docs/workflows/workflows.md similarity index 100% rename from _docs/pipelines/workflows.md rename to _docs/workflows/workflows.md diff --git a/images/administration/access-control/kubernetes-abac.png b/images/administration/access-control/kubernetes-abac.png new file mode 100644 index 000000000..6dd650d56 Binary files /dev/null and b/images/administration/access-control/kubernetes-abac.png differ diff --git a/images/administration/access-control/kubernetes-policies.png b/images/administration/access-control/kubernetes-policies.png new file mode 100644 index 000000000..97249cfe7 Binary files /dev/null and b/images/administration/access-control/kubernetes-policies.png differ diff --git a/images/administration/access-control/pipeline-git-restrictions.png b/images/administration/access-control/pipeline-git-restrictions.png new file mode 100644 index 000000000..caa94620e Binary files /dev/null and b/images/administration/access-control/pipeline-git-restrictions.png differ diff --git a/images/administration/access-control/pipeline-restrictions.png b/images/administration/access-control/pipeline-restrictions.png new file mode 100644 index 000000000..75fbafbd8 Binary files /dev/null and b/images/administration/access-control/pipeline-restrictions.png differ diff --git a/images/administration/access-control/pipeline-tags.png b/images/administration/access-control/pipeline-tags.png new file mode 100644 index 000000000..22b921e63 Binary files /dev/null and b/images/administration/access-control/pipeline-tags.png differ diff --git a/images/administration/access-control/security-timeout.png b/images/administration/access-control/security-timeout.png new file mode 100644 index 000000000..41d477fea Binary files /dev/null and b/images/administration/access-control/security-timeout.png differ diff --git a/images/administration/access-control/tagging-kubernetes-clusters.png b/images/administration/access-control/tagging-kubernetes-clusters.png new file mode 100644 index 000000000..0d118f98b Binary files /dev/null and b/images/administration/access-control/tagging-kubernetes-clusters.png differ diff --git a/images/administration/access-control/teams.png b/images/administration/access-control/teams.png new file mode 100644 index 000000000..58f7c2c85 Binary files /dev/null and b/images/administration/access-control/teams.png differ diff --git a/images/administration/access-control/user-access-control.png b/images/administration/access-control/user-access-control.png new file mode 100644 index 000000000..954101752 Binary files /dev/null and b/images/administration/access-control/user-access-control.png differ diff --git a/images/administration/audit/api-call-details.png b/images/administration/audit/api-call-details.png new file mode 100644 index 000000000..5bae454cc Binary files /dev/null and b/images/administration/audit/api-call-details.png differ diff --git a/images/administration/audit/audit-filter.png b/images/administration/audit/audit-filter.png new file mode 100644 index 000000000..4a8f77627 Binary files /dev/null and b/images/administration/audit/audit-filter.png differ diff --git a/images/administration/audit/audit-logs.png b/images/administration/audit/audit-logs.png new file mode 100644 index 000000000..0bcda78c2 Binary files /dev/null and b/images/administration/audit/audit-logs.png differ diff --git a/images/administration/audit/audit-triggers.png b/images/administration/audit/audit-triggers.png new file mode 100644 index 000000000..20ca12d88 Binary files /dev/null and b/images/administration/audit/audit-triggers.png differ diff --git a/images/administration/audit/icon-UnorderedList-notification.png b/images/administration/audit/icon-UnorderedList-notification.png deleted file mode 100644 index 0ba6f1783..000000000 Binary files a/images/administration/audit/icon-UnorderedList-notification.png and /dev/null differ diff --git a/images/administration/create-account/bitbucket-authorize.png b/images/administration/create-account/bitbucket-authorize.png new file mode 100644 index 000000000..987adbff3 Binary files /dev/null and b/images/administration/create-account/bitbucket-authorize.png differ diff --git a/images/administration/create-account/codefresh-accountname.png b/images/administration/create-account/codefresh-accountname.png new file mode 100644 index 000000000..44cfd1036 Binary files /dev/null and b/images/administration/create-account/codefresh-accountname.png differ diff --git a/images/administration/create-account/codefresh-dashboard.png b/images/administration/create-account/codefresh-dashboard.png new file mode 100644 index 000000000..86c077b08 Binary files /dev/null and b/images/administration/create-account/codefresh-dashboard.png differ diff --git a/images/administration/create-account/codefresh-personalize.png b/images/administration/create-account/codefresh-personalize.png new file mode 100644 index 000000000..205fd55b3 Binary files /dev/null and b/images/administration/create-account/codefresh-personalize.png differ diff --git a/images/administration/create-account/codefresh-signup.png b/images/administration/create-account/codefresh-signup.png new file mode 100644 index 000000000..ab7a35661 Binary files /dev/null and b/images/administration/create-account/codefresh-signup.png differ diff --git a/images/administration/create-account/create-account-steps.png b/images/administration/create-account/create-account-steps.png new file mode 100644 index 000000000..9769257b5 Binary files /dev/null and b/images/administration/create-account/create-account-steps.png differ diff --git a/images/administration/create-account/git-firewall.png b/images/administration/create-account/git-firewall.png new file mode 100644 index 000000000..bf4842c4f Binary files /dev/null and b/images/administration/create-account/git-firewall.png differ diff --git a/images/administration/create-account/github-authorize.png b/images/administration/create-account/github-authorize.png new file mode 100644 index 000000000..49aa8809b Binary files /dev/null and b/images/administration/create-account/github-authorize.png differ diff --git a/images/administration/create-account/gitlab-authorize.png b/images/administration/create-account/gitlab-authorize.png new file mode 100644 index 000000000..d666b39f6 Binary files /dev/null and b/images/administration/create-account/gitlab-authorize.png differ diff --git a/images/administration/create-account/select-identity-provider.png b/images/administration/create-account/select-identity-provider.png new file mode 100644 index 000000000..d5321606e Binary files /dev/null and b/images/administration/create-account/select-identity-provider.png differ diff --git a/images/administration/create-account/stash.png b/images/administration/create-account/stash.png new file mode 100644 index 000000000..1415da356 Binary files /dev/null and b/images/administration/create-account/stash.png differ diff --git a/images/administration/manage-pats/allow-support-access.png b/images/administration/manage-pats/allow-support-access.png new file mode 100644 index 000000000..2f58419c2 Binary files /dev/null and b/images/administration/manage-pats/allow-support-access.png differ diff --git a/images/administration/manage-pats/bitbucket-pat-scopes.png b/images/administration/manage-pats/bitbucket-pat-scopes.png new file mode 100644 index 000000000..3a81e17a0 Binary files /dev/null and b/images/administration/manage-pats/bitbucket-pat-scopes.png differ diff --git a/images/administration/manage-pats/github-pat-scopes.png b/images/administration/manage-pats/github-pat-scopes.png new file mode 100644 index 000000000..bd3443fb7 Binary files /dev/null and b/images/administration/manage-pats/github-pat-scopes.png differ diff --git a/images/administration/manage-pats/gitlab-pat-scopes.png b/images/administration/manage-pats/gitlab-pat-scopes.png new file mode 100644 index 000000000..08911e5d3 Binary files /dev/null and b/images/administration/manage-pats/gitlab-pat-scopes.png differ diff --git a/images/administration/manage-pats/oauth-user-authentication.png b/images/administration/manage-pats/oauth-user-authentication.png new file mode 100644 index 000000000..d57bbb417 Binary files /dev/null and b/images/administration/manage-pats/oauth-user-authentication.png differ diff --git a/images/administration/manage-pats/user-settings-pat.png b/images/administration/manage-pats/user-settings-pat.png new file mode 100644 index 000000000..fd97a1044 Binary files /dev/null and b/images/administration/manage-pats/user-settings-pat.png differ diff --git a/images/administration/sso/collaborators.png b/images/administration/sso/collaborators.png index e589219d0..a0e022c17 100644 Binary files a/images/administration/sso/collaborators.png and b/images/administration/sso/collaborators.png differ diff --git a/images/administration/user-settings/allow-support-access.png b/images/administration/user-settings/allow-support-access.png new file mode 100644 index 000000000..2f58419c2 Binary files /dev/null and b/images/administration/user-settings/allow-support-access.png differ diff --git a/images/administration/user-settings/notifications.png b/images/administration/user-settings/notifications.png new file mode 100644 index 000000000..2e54acc6c Binary files /dev/null and b/images/administration/user-settings/notifications.png differ diff --git a/images/administration/users/invite-users.png b/images/administration/users/invite-users.png new file mode 100644 index 000000000..ee244385a Binary files /dev/null and b/images/administration/users/invite-users.png differ diff --git a/images/deployments/helm/add-helm-repository.png b/images/deployments/helm/add-helm-repository.png new file mode 100644 index 000000000..f9227b255 Binary files /dev/null and b/images/deployments/helm/add-helm-repository.png differ diff --git a/images/deployments/helm/connect-helm-repo.png b/images/deployments/helm/connect-helm-repo.png new file mode 100644 index 000000000..576319863 Binary files /dev/null and b/images/deployments/helm/connect-helm-repo.png differ diff --git a/images/deployments/helm/diff.png b/images/deployments/helm/diff.png new file mode 100644 index 000000000..56888435f Binary files /dev/null and b/images/deployments/helm/diff.png differ diff --git a/images/deployments/helm/full-helm-pipeline.png b/images/deployments/helm/full-helm-pipeline.png new file mode 100644 index 000000000..429cbe4c8 Binary files /dev/null and b/images/deployments/helm/full-helm-pipeline.png differ diff --git a/images/deployments/helm/helm-badge.png b/images/deployments/helm/helm-badge.png new file mode 100644 index 000000000..ff284dcd8 Binary files /dev/null and b/images/deployments/helm/helm-badge.png differ diff --git a/images/deployments/helm/helm-commit-message.png b/images/deployments/helm/helm-commit-message.png new file mode 100644 index 000000000..b9433254c Binary files /dev/null and b/images/deployments/helm/helm-commit-message.png differ diff --git a/images/deployments/helm/helm-release-dashboard.png b/images/deployments/helm/helm-release-dashboard.png new file mode 100644 index 000000000..c7078f88b Binary files /dev/null and b/images/deployments/helm/helm-release-dashboard.png differ diff --git a/images/deployments/helm/history.png b/images/deployments/helm/history.png new file mode 100644 index 000000000..c00e3e5a8 Binary files /dev/null and b/images/deployments/helm/history.png differ diff --git a/images/deployments/helm/import-helm-configuration.png b/images/deployments/helm/import-helm-configuration.png new file mode 100644 index 000000000..17fbbf3cb Binary files /dev/null and b/images/deployments/helm/import-helm-configuration.png differ diff --git a/images/deployments/helm/k8s-name.png b/images/deployments/helm/k8s-name.png new file mode 100644 index 000000000..24197bb23 Binary files /dev/null and b/images/deployments/helm/k8s-name.png differ diff --git a/images/deployments/helm/managed-helm-repo.png b/images/deployments/helm/managed-helm-repo.png new file mode 100644 index 000000000..0a9d0c5b7 Binary files /dev/null and b/images/deployments/helm/managed-helm-repo.png differ diff --git a/images/deployments/helm/manifests.png b/images/deployments/helm/manifests.png new file mode 100644 index 000000000..67e4ad306 Binary files /dev/null and b/images/deployments/helm/manifests.png differ diff --git a/images/deployments/helm/override-helm-actions.png b/images/deployments/helm/override-helm-actions.png new file mode 100644 index 000000000..db9a39df9 Binary files /dev/null and b/images/deployments/helm/override-helm-actions.png differ diff --git a/images/deployments/helm/promotion/board-management.png b/images/deployments/helm/promotion/board-management.png new file mode 100644 index 000000000..8b99e0a87 Binary files /dev/null and b/images/deployments/helm/promotion/board-management.png differ diff --git a/images/deployments/helm/promotion/board-selection.png b/images/deployments/helm/promotion/board-selection.png new file mode 100644 index 000000000..01922b53b Binary files /dev/null and b/images/deployments/helm/promotion/board-selection.png differ diff --git a/images/deployments/helm/promotion/board.png b/images/deployments/helm/promotion/board.png new file mode 100644 index 000000000..bbd35458a Binary files /dev/null and b/images/deployments/helm/promotion/board.png differ diff --git a/images/deployments/helm/promotion/edit-helm-environment.png b/images/deployments/helm/promotion/edit-helm-environment.png new file mode 100644 index 000000000..7e06aa853 Binary files /dev/null and b/images/deployments/helm/promotion/edit-helm-environment.png differ diff --git a/images/deployments/helm/promotion/edit-value.png b/images/deployments/helm/promotion/edit-value.png new file mode 100644 index 000000000..b28abfb2f Binary files /dev/null and b/images/deployments/helm/promotion/edit-value.png differ diff --git a/images/deployments/helm/promotion/expand.png b/images/deployments/helm/promotion/expand.png new file mode 100644 index 000000000..9e4d063f4 Binary files /dev/null and b/images/deployments/helm/promotion/expand.png differ diff --git a/images/deployments/helm/promotion/filter.png b/images/deployments/helm/promotion/filter.png new file mode 100644 index 000000000..a429fd407 Binary files /dev/null and b/images/deployments/helm/promotion/filter.png differ diff --git a/images/deployments/helm/promotion/helm-environments.png b/images/deployments/helm/promotion/helm-environments.png new file mode 100644 index 000000000..4b5849797 Binary files /dev/null and b/images/deployments/helm/promotion/helm-environments.png differ diff --git a/images/deployments/helm/promotion/promote-settings.png b/images/deployments/helm/promotion/promote-settings.png new file mode 100644 index 000000000..4d965ac59 Binary files /dev/null and b/images/deployments/helm/promotion/promote-settings.png differ diff --git a/images/deployments/helm/promotion/promote.png b/images/deployments/helm/promotion/promote.png new file mode 100644 index 000000000..94bd01a85 Binary files /dev/null and b/images/deployments/helm/promotion/promote.png differ diff --git a/images/deployments/helm/promotion/shift-left.png b/images/deployments/helm/promotion/shift-left.png new file mode 100644 index 000000000..cce3c9378 Binary files /dev/null and b/images/deployments/helm/promotion/shift-left.png differ diff --git a/images/deployments/helm/promotion/shift-right.png b/images/deployments/helm/promotion/shift-right.png new file mode 100644 index 000000000..54d1a2ff0 Binary files /dev/null and b/images/deployments/helm/promotion/shift-right.png differ diff --git a/images/deployments/helm/promotion/value-options.png b/images/deployments/helm/promotion/value-options.png new file mode 100644 index 000000000..5ce6820f5 Binary files /dev/null and b/images/deployments/helm/promotion/value-options.png differ diff --git a/images/deployments/helm/quick-helm-integration.png b/images/deployments/helm/quick-helm-integration.png new file mode 100644 index 000000000..d6e7ced4b Binary files /dev/null and b/images/deployments/helm/quick-helm-integration.png differ diff --git a/images/deployments/helm/rollback.png b/images/deployments/helm/rollback.png new file mode 100644 index 000000000..5ff38aae6 Binary files /dev/null and b/images/deployments/helm/rollback.png differ diff --git a/images/deployments/helm/services.png b/images/deployments/helm/services.png new file mode 100644 index 000000000..e9e00b23e Binary files /dev/null and b/images/deployments/helm/services.png differ diff --git a/images/deployments/kubernetes/define-k8s-deployment-resource.png b/images/deployments/kubernetes/define-k8s-deployment-resource.png new file mode 100644 index 000000000..381ea2e78 Binary files /dev/null and b/images/deployments/kubernetes/define-k8s-deployment-resource.png differ diff --git a/images/deployments/kubernetes/define-k8s-service-resource.png b/images/deployments/kubernetes/define-k8s-service-resource.png new file mode 100644 index 000000000..2488975be Binary files /dev/null and b/images/deployments/kubernetes/define-k8s-service-resource.png differ diff --git a/images/deployments/kubernetes/deploying-private-cf-registry.png b/images/deployments/kubernetes/deploying-private-cf-registry.png new file mode 100644 index 000000000..92b9c979c Binary files /dev/null and b/images/deployments/kubernetes/deploying-private-cf-registry.png differ diff --git a/images/deployments/kubernetes/describe-k8s-deployment.png b/images/deployments/kubernetes/describe-k8s-deployment.png new file mode 100644 index 000000000..68be6815a Binary files /dev/null and b/images/deployments/kubernetes/describe-k8s-deployment.png differ diff --git a/images/deployments/kubernetes/environment-variables-deployment.png b/images/deployments/kubernetes/environment-variables-deployment.png new file mode 100644 index 000000000..6b90e010a Binary files /dev/null and b/images/deployments/kubernetes/environment-variables-deployment.png differ diff --git a/images/deployments/kubernetes/grid-view.png b/images/deployments/kubernetes/grid-view.png new file mode 100644 index 000000000..d05e9e554 Binary files /dev/null and b/images/deployments/kubernetes/grid-view.png differ diff --git a/images/deployments/kubernetes/kube-context.png b/images/deployments/kubernetes/kube-context.png new file mode 100644 index 000000000..c94744a62 Binary files /dev/null and b/images/deployments/kubernetes/kube-context.png differ diff --git a/images/deployments/kubernetes/kubernetes-dashboard.png b/images/deployments/kubernetes/kubernetes-dashboard.png new file mode 100644 index 000000000..1a78c079f Binary files /dev/null and b/images/deployments/kubernetes/kubernetes-dashboard.png differ diff --git a/images/deployments/kubernetes/parallel-kubectl.png b/images/deployments/kubernetes/parallel-kubectl.png new file mode 100644 index 000000000..9945bfe6b Binary files /dev/null and b/images/deployments/kubernetes/parallel-kubectl.png differ diff --git a/images/deployments/kubernetes/quick-ui-deploy.png b/images/deployments/kubernetes/quick-ui-deploy.png new file mode 100644 index 000000000..b2bc242d9 Binary files /dev/null and b/images/deployments/kubernetes/quick-ui-deploy.png differ diff --git a/images/examples/amazon-ecs/ecs-pipeline-deployment.png b/images/examples/amazon-ecs/ecs-pipeline-deployment.png new file mode 100644 index 000000000..cc4d3347d Binary files /dev/null and b/images/examples/amazon-ecs/ecs-pipeline-deployment.png differ diff --git a/images/examples/amazon-ecs/ecs-variables.png b/images/examples/amazon-ecs/ecs-variables.png new file mode 100644 index 000000000..f9bc5fa16 Binary files /dev/null and b/images/examples/amazon-ecs/ecs-variables.png differ diff --git a/images/examples/checkout/add-new-microservice.png b/images/examples/checkout/add-new-microservice.png new file mode 100644 index 000000000..58b965bc7 Binary files /dev/null and b/images/examples/checkout/add-new-microservice.png differ diff --git a/images/examples/checkout/add-new-microservice.svg b/images/examples/checkout/add-new-microservice.svg new file mode 100644 index 000000000..fd393f6bd --- /dev/null +++ b/images/examples/checkout/add-new-microservice.svg @@ -0,0 +1,1348 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + +   + step + + +   + + + Pipeline + + +   + step + + + +   + step + + + + Trigger 1 + + + + + Trigger 2 + + + + Trigger 3 + + + + Trigger 4 + + + + + + + Microservice N + + Trigger N + + + + diff --git a/images/examples/checkout/simulate-trigger.png b/images/examples/checkout/simulate-trigger.png new file mode 100644 index 000000000..2c4da4ff4 Binary files /dev/null and b/images/examples/checkout/simulate-trigger.png differ diff --git a/images/examples/composition/launch-composition-example.png b/images/examples/composition/launch-composition-example.png new file mode 100644 index 000000000..ff5068a1e Binary files /dev/null and b/images/examples/composition/launch-composition-example.png differ diff --git a/images/examples/deployments/heroku-deployer-pipeline.png b/images/examples/deployments/heroku-deployer-pipeline.png new file mode 100644 index 000000000..3189634c4 Binary files /dev/null and b/images/examples/deployments/heroku-deployer-pipeline.png differ diff --git a/images/examples/deployments/heroku-deployer-variables.png b/images/examples/deployments/heroku-deployer-variables.png new file mode 100644 index 000000000..7fff37fbd Binary files /dev/null and b/images/examples/deployments/heroku-deployer-variables.png differ diff --git a/images/examples/deployments/heroku-deployer-variables2.png b/images/examples/deployments/heroku-deployer-variables2.png new file mode 100644 index 000000000..a8a944c5e Binary files /dev/null and b/images/examples/deployments/heroku-deployer-variables2.png differ diff --git a/images/examples/deployments/heroku-vanilla-push-pipeline.png b/images/examples/deployments/heroku-vanilla-push-pipeline.png new file mode 100644 index 000000000..d3ef1209b Binary files /dev/null and b/images/examples/deployments/heroku-vanilla-push-pipeline.png differ diff --git a/images/examples/deployments/k8s-deployment-CD-pipeline.png b/images/examples/deployments/k8s-deployment-CD-pipeline.png new file mode 100644 index 000000000..551ff8404 Binary files /dev/null and b/images/examples/deployments/k8s-deployment-CD-pipeline.png differ diff --git a/images/examples/deployments/k8s-deployment-ci-pipeline.png b/images/examples/deployments/k8s-deployment-ci-pipeline.png new file mode 100644 index 000000000..0e51832c6 Binary files /dev/null and b/images/examples/deployments/k8s-deployment-ci-pipeline.png differ diff --git a/images/examples/deployments/k8s-kustomize-dashboard.png b/images/examples/deployments/k8s-kustomize-dashboard.png new file mode 100644 index 000000000..2117ca0ec Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-dashboard.png differ diff --git a/images/examples/deployments/k8s-kustomize-pipeline.png b/images/examples/deployments/k8s-kustomize-pipeline.png new file mode 100644 index 000000000..59ab7270f Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-pipeline.png differ diff --git a/images/examples/deployments/k8s-kustomize-prod-endpoint.png b/images/examples/deployments/k8s-kustomize-prod-endpoint.png new file mode 100644 index 000000000..1dc4639b1 Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-prod-endpoint.png differ diff --git a/images/examples/deployments/k8s-kustomize-prod-pipeline.png b/images/examples/deployments/k8s-kustomize-prod-pipeline.png new file mode 100644 index 000000000..91ee13d08 Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-prod-pipeline.png differ diff --git a/images/examples/deployments/k8s-kustomize-staging-endpoint.png b/images/examples/deployments/k8s-kustomize-staging-endpoint.png new file mode 100644 index 000000000..fb9d016b1 Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-staging-endpoint.png differ diff --git a/images/examples/deployments/k8s-kustomize-staging-pipeline.png b/images/examples/deployments/k8s-kustomize-staging-pipeline.png new file mode 100644 index 000000000..4984c6242 Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-staging-pipeline.png differ diff --git a/images/examples/deployments/scp-hello-world.png b/images/examples/deployments/scp-hello-world.png new file mode 100644 index 000000000..d375443d3 Binary files /dev/null and b/images/examples/deployments/scp-hello-world.png differ diff --git a/images/examples/deployments/scp-pipeline.png b/images/examples/deployments/scp-pipeline.png new file mode 100644 index 000000000..de281fb74 Binary files /dev/null and b/images/examples/deployments/scp-pipeline.png differ diff --git a/images/examples/deployments/scp-variables.png b/images/examples/deployments/scp-variables.png new file mode 100644 index 000000000..5d8841804 Binary files /dev/null and b/images/examples/deployments/scp-variables.png differ diff --git a/images/examples/docker-build/auto-push-to-cfcr.png b/images/examples/docker-build/auto-push-to-cfcr.png new file mode 100644 index 000000000..6f0b10139 Binary files /dev/null and b/images/examples/docker-build/auto-push-to-cfcr.png differ diff --git a/images/examples/docker-build/build-and-push-pipeline.png b/images/examples/docker-build/build-and-push-pipeline.png new file mode 100644 index 000000000..9e4a943d9 Binary files /dev/null and b/images/examples/docker-build/build-and-push-pipeline.png differ diff --git a/images/examples/docker-build/build-dockerfile-root.png b/images/examples/docker-build/build-dockerfile-root.png new file mode 100644 index 000000000..d08ca037d Binary files /dev/null and b/images/examples/docker-build/build-dockerfile-root.png differ diff --git a/images/examples/docker-build/build-from-other-git-repo.png b/images/examples/docker-build/build-from-other-git-repo.png new file mode 100644 index 000000000..5c61a1e39 Binary files /dev/null and b/images/examples/docker-build/build-from-other-git-repo.png differ diff --git a/images/examples/docker-build/build-spefify-dockerfile.png b/images/examples/docker-build/build-spefify-dockerfile.png new file mode 100644 index 000000000..23e71e298 Binary files /dev/null and b/images/examples/docker-build/build-spefify-dockerfile.png differ diff --git a/images/examples/docker-build/cfcr-layers.png b/images/examples/docker-build/cfcr-layers.png new file mode 100644 index 000000000..ca67be1f9 Binary files /dev/null and b/images/examples/docker-build/cfcr-layers.png differ diff --git a/images/examples/docker-build/docker-build-arguments.png b/images/examples/docker-build/docker-build-arguments.png new file mode 100644 index 000000000..5907584a8 Binary files /dev/null and b/images/examples/docker-build/docker-build-arguments.png differ diff --git a/images/examples/docker-build/two-docker-images.png b/images/examples/docker-build/two-docker-images.png new file mode 100644 index 000000000..c7974d0d7 Binary files /dev/null and b/images/examples/docker-build/two-docker-images.png differ diff --git a/images/examples/docker-https/codefresh_nginx_container.png b/images/examples/docker-https/codefresh_nginx_container.png new file mode 100644 index 000000000..f2aea39d3 Binary files /dev/null and b/images/examples/docker-https/codefresh_nginx_container.png differ diff --git a/images/examples/docker-https/codefresh_webapp_container.png b/images/examples/docker-https/codefresh_webapp_container.png new file mode 100644 index 000000000..b56e30aed Binary files /dev/null and b/images/examples/docker-https/codefresh_webapp_container.png differ diff --git a/images/examples/docker-swarm/docker-swarm-pipeline.png b/images/examples/docker-swarm/docker-swarm-pipeline.png new file mode 100644 index 000000000..2fe7ed3c9 Binary files /dev/null and b/images/examples/docker-swarm/docker-swarm-pipeline.png differ diff --git a/images/examples/elastic-beanstalk/60d70d4-codefresh_eb_env_vars.png b/images/examples/elastic-beanstalk/60d70d4-codefresh_eb_env_vars.png new file mode 100644 index 000000000..2d481ef8f Binary files /dev/null and b/images/examples/elastic-beanstalk/60d70d4-codefresh_eb_env_vars.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png b/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png new file mode 100644 index 000000000..631cc70b3 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png b/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png new file mode 100644 index 000000000..2d481ef8f Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png b/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png new file mode 100644 index 000000000..f1f462b52 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_environment.png b/images/examples/elastic-beanstalk/codefresh_eb_environment.png new file mode 100644 index 000000000..3b7f6fce1 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_environment.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_health.png b/images/examples/elastic-beanstalk/codefresh_eb_health.png new file mode 100644 index 000000000..90e083b50 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_health.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_version_label.png b/images/examples/elastic-beanstalk/codefresh_eb_version_label.png new file mode 100644 index 000000000..62e33942b Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_version_label.png differ diff --git a/images/examples/git/sha-id-codefresh.png b/images/examples/git/sha-id-codefresh.png new file mode 100644 index 000000000..1b8b758d3 Binary files /dev/null and b/images/examples/git/sha-id-codefresh.png differ diff --git a/images/examples/git/sha-id-docker-hub.png b/images/examples/git/sha-id-docker-hub.png new file mode 100644 index 000000000..15bb44992 Binary files /dev/null and b/images/examples/git/sha-id-docker-hub.png differ diff --git a/images/examples/gs/gs-download-pipeline.png b/images/examples/gs/gs-download-pipeline.png new file mode 100644 index 000000000..cd704ccff Binary files /dev/null and b/images/examples/gs/gs-download-pipeline.png differ diff --git a/images/examples/gs/gs-pipeline-vars.png b/images/examples/gs/gs-pipeline-vars.png new file mode 100644 index 000000000..c356bc58a Binary files /dev/null and b/images/examples/gs/gs-pipeline-vars.png differ diff --git a/images/examples/gs/gs-upload-pipeline.png b/images/examples/gs/gs-upload-pipeline.png new file mode 100644 index 000000000..e6154ca42 Binary files /dev/null and b/images/examples/gs/gs-upload-pipeline.png differ diff --git a/images/examples/helm/helm-chart.png b/images/examples/helm/helm-chart.png new file mode 100644 index 000000000..54bfff9ca Binary files /dev/null and b/images/examples/helm/helm-chart.png differ diff --git a/images/examples/helm/helm-deploy-pipeline.png b/images/examples/helm/helm-deploy-pipeline.png new file mode 100644 index 000000000..3cf18fa3b Binary files /dev/null and b/images/examples/helm/helm-deploy-pipeline.png differ diff --git a/images/examples/helm/helm-push-and-deploy-pipeline.png b/images/examples/helm/helm-push-and-deploy-pipeline.png new file mode 100644 index 000000000..284b6aa8c Binary files /dev/null and b/images/examples/helm/helm-push-and-deploy-pipeline.png differ diff --git a/images/examples/helm/helm-release.png b/images/examples/helm/helm-release.png new file mode 100644 index 000000000..aa25f4733 Binary files /dev/null and b/images/examples/helm/helm-release.png differ diff --git a/images/examples/helm/import-helm-configuration.png b/images/examples/helm/import-helm-configuration.png new file mode 100644 index 000000000..538e04abd Binary files /dev/null and b/images/examples/helm/import-helm-configuration.png differ diff --git a/images/examples/integration-tests/integration-tests.png b/images/examples/integration-tests/integration-tests.png new file mode 100644 index 000000000..d14833241 Binary files /dev/null and b/images/examples/integration-tests/integration-tests.png differ diff --git a/images/examples/integration-tests/mongodb-integration-tests.png b/images/examples/integration-tests/mongodb-integration-tests.png new file mode 100644 index 000000000..78604436b Binary files /dev/null and b/images/examples/integration-tests/mongodb-integration-tests.png differ diff --git a/images/examples/integration-tests/mysql-integration-tests.png b/images/examples/integration-tests/mysql-integration-tests.png new file mode 100644 index 000000000..de46fed81 Binary files /dev/null and b/images/examples/integration-tests/mysql-integration-tests.png differ diff --git a/images/examples/integration-tests/postgresql-integration-tests.png b/images/examples/integration-tests/postgresql-integration-tests.png new file mode 100644 index 000000000..b661aecb5 Binary files /dev/null and b/images/examples/integration-tests/postgresql-integration-tests.png differ diff --git a/images/examples/integration-tests/preload-data-to-db.png b/images/examples/integration-tests/preload-data-to-db.png new file mode 100644 index 000000000..bc7762e39 Binary files /dev/null and b/images/examples/integration-tests/preload-data-to-db.png differ diff --git a/images/examples/integration-tests/redis-integration-tests.png b/images/examples/integration-tests/redis-integration-tests.png new file mode 100644 index 000000000..67b04e6e2 Binary files /dev/null and b/images/examples/integration-tests/redis-integration-tests.png differ diff --git a/images/examples/nested-pipelines/call-other-pipeline.png b/images/examples/nested-pipelines/call-other-pipeline.png new file mode 100644 index 000000000..dc170913e Binary files /dev/null and b/images/examples/nested-pipelines/call-other-pipeline.png differ diff --git a/images/examples/nomad/.keep b/images/examples/nomad/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/images/examples/nomad/nomad-ci-pipeline.png b/images/examples/nomad/nomad-ci-pipeline.png new file mode 100644 index 000000000..45f9c6761 Binary files /dev/null and b/images/examples/nomad/nomad-ci-pipeline.png differ diff --git a/images/examples/nomad/nomad-ui-deployment.png b/images/examples/nomad/nomad-ui-deployment.png new file mode 100644 index 000000000..bf1b9736d Binary files /dev/null and b/images/examples/nomad/nomad-ui-deployment.png differ diff --git a/images/examples/nomad/nomad-variables.png b/images/examples/nomad/nomad-variables.png new file mode 100644 index 000000000..98de8aaa2 Binary files /dev/null and b/images/examples/nomad/nomad-variables.png differ diff --git a/images/examples/packer-gcloud/.keep b/images/examples/packer-gcloud/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/images/examples/packer-gcloud/packer-codefresh-pipeline.png b/images/examples/packer-gcloud/packer-codefresh-pipeline.png new file mode 100644 index 000000000..831361ea7 Binary files /dev/null and b/images/examples/packer-gcloud/packer-codefresh-pipeline.png differ diff --git a/images/examples/packer-gcloud/service-account-variable.png b/images/examples/packer-gcloud/service-account-variable.png new file mode 100644 index 000000000..6da5c0c3f Binary files /dev/null and b/images/examples/packer-gcloud/service-account-variable.png differ diff --git a/images/examples/packer-gcloud/web-app-url.png b/images/examples/packer-gcloud/web-app-url.png new file mode 100644 index 000000000..b5e356247 Binary files /dev/null and b/images/examples/packer-gcloud/web-app-url.png differ diff --git a/images/examples/php-file-transfer/pipeline.png b/images/examples/php-file-transfer/pipeline.png new file mode 100644 index 000000000..bc8e1d543 Binary files /dev/null and b/images/examples/php-file-transfer/pipeline.png differ diff --git a/images/examples/php-file-transfer/test-environment.png b/images/examples/php-file-transfer/test-environment.png new file mode 100644 index 000000000..53f6fe805 Binary files /dev/null and b/images/examples/php-file-transfer/test-environment.png differ diff --git a/images/examples/php-file-transfer/variables.png b/images/examples/php-file-transfer/variables.png new file mode 100644 index 000000000..12e59682f Binary files /dev/null and b/images/examples/php-file-transfer/variables.png differ diff --git a/images/examples/pulumi/pulumi-access-token.png b/images/examples/pulumi/pulumi-access-token.png new file mode 100644 index 000000000..b060c7105 Binary files /dev/null and b/images/examples/pulumi/pulumi-access-token.png differ diff --git a/images/examples/pulumi/pulumi-pipeline.png b/images/examples/pulumi/pulumi-pipeline.png new file mode 100644 index 000000000..c685ecae9 Binary files /dev/null and b/images/examples/pulumi/pulumi-pipeline.png differ diff --git a/images/examples/scala/multi-stage-pipeline.png b/images/examples/scala/multi-stage-pipeline.png new file mode 100644 index 000000000..fccce93cc Binary files /dev/null and b/images/examples/scala/multi-stage-pipeline.png differ diff --git a/images/examples/scala/pipeline.png b/images/examples/scala/pipeline.png new file mode 100644 index 000000000..0ad5f393f Binary files /dev/null and b/images/examples/scala/pipeline.png differ diff --git a/images/examples/scala/single-stage-pipeline.png b/images/examples/scala/single-stage-pipeline.png new file mode 100644 index 000000000..a6c34e5c0 Binary files /dev/null and b/images/examples/scala/single-stage-pipeline.png differ diff --git a/images/examples/sealed-secrets/add-app.png b/images/examples/sealed-secrets/add-app.png new file mode 100644 index 000000000..e90655be9 Binary files /dev/null and b/images/examples/sealed-secrets/add-app.png differ diff --git a/images/examples/sealed-secrets/app-secrets.png b/images/examples/sealed-secrets/app-secrets.png new file mode 100644 index 000000000..71f7c9055 Binary files /dev/null and b/images/examples/sealed-secrets/app-secrets.png differ diff --git a/images/examples/sealed-secrets/current-state.png b/images/examples/sealed-secrets/current-state.png new file mode 100644 index 000000000..afab12584 Binary files /dev/null and b/images/examples/sealed-secrets/current-state.png differ diff --git a/images/examples/secrets/mozilla-sops-pipeline-vars.png b/images/examples/secrets/mozilla-sops-pipeline-vars.png new file mode 100644 index 000000000..fdbef99cb Binary files /dev/null and b/images/examples/secrets/mozilla-sops-pipeline-vars.png differ diff --git a/images/examples/secrets/mozilla-sops-pipeline.png b/images/examples/secrets/mozilla-sops-pipeline.png new file mode 100644 index 000000000..977e6ac09 Binary files /dev/null and b/images/examples/secrets/mozilla-sops-pipeline.png differ diff --git a/images/examples/secrets/vault-pipeline.png b/images/examples/secrets/vault-pipeline.png new file mode 100644 index 000000000..ce4245ab7 Binary files /dev/null and b/images/examples/secrets/vault-pipeline.png differ diff --git a/images/examples/secrets/vault-pipeline2.png b/images/examples/secrets/vault-pipeline2.png new file mode 100644 index 000000000..3b53a97c9 Binary files /dev/null and b/images/examples/secrets/vault-pipeline2.png differ diff --git a/images/examples/shared-workspace/volume-list.png b/images/examples/shared-workspace/volume-list.png new file mode 100644 index 000000000..dd81b2c7c Binary files /dev/null and b/images/examples/shared-workspace/volume-list.png differ diff --git a/images/examples/terraform/google_cloud_json.png b/images/examples/terraform/google_cloud_json.png new file mode 100644 index 000000000..489c0da24 Binary files /dev/null and b/images/examples/terraform/google_cloud_json.png differ diff --git a/images/examples/terraform/terraform-pipeline.png b/images/examples/terraform/terraform-pipeline.png new file mode 100644 index 000000000..7fa28781e Binary files /dev/null and b/images/examples/terraform/terraform-pipeline.png differ diff --git a/images/examples/unit-tests/fan-in-fan-out-pipeline.png b/images/examples/unit-tests/fan-in-fan-out-pipeline.png new file mode 100644 index 000000000..f4e24eb68 Binary files /dev/null and b/images/examples/unit-tests/fan-in-fan-out-pipeline.png differ diff --git a/images/examples/unit-tests/parallel-pipeline-examples.png b/images/examples/unit-tests/parallel-pipeline-examples.png new file mode 100644 index 000000000..8b1715458 Binary files /dev/null and b/images/examples/unit-tests/parallel-pipeline-examples.png differ diff --git a/images/examples/unit-tests/unit-tests-pipeline.png b/images/examples/unit-tests/unit-tests-pipeline.png new file mode 100644 index 000000000..8f21d296e Binary files /dev/null and b/images/examples/unit-tests/unit-tests-pipeline.png differ diff --git a/images/guides/branches-pull-requests/auto-branch-build.png b/images/guides/branches-pull-requests/auto-branch-build.png new file mode 100644 index 000000000..5e486c1c7 Binary files /dev/null and b/images/guides/branches-pull-requests/auto-branch-build.png differ diff --git a/images/guides/branches-pull-requests/branch-step-condition.png b/images/guides/branches-pull-requests/branch-step-condition.png new file mode 100644 index 000000000..1b8a4f05d Binary files /dev/null and b/images/guides/branches-pull-requests/branch-step-condition.png differ diff --git a/images/guides/branches-pull-requests/build-specific-branch.png b/images/guides/branches-pull-requests/build-specific-branch.png new file mode 100644 index 000000000..3d5e31e7e Binary files /dev/null and b/images/guides/branches-pull-requests/build-specific-branch.png differ diff --git a/images/guides/branches-pull-requests/choosing-pr-events.png b/images/guides/branches-pull-requests/choosing-pr-events.png new file mode 100644 index 000000000..e37013560 Binary files /dev/null and b/images/guides/branches-pull-requests/choosing-pr-events.png differ diff --git a/images/guides/branches-pull-requests/feature-pipeline.png b/images/guides/branches-pull-requests/feature-pipeline.png new file mode 100644 index 000000000..f7f7e4c9e Binary files /dev/null and b/images/guides/branches-pull-requests/feature-pipeline.png differ diff --git a/images/guides/branches-pull-requests/git-flow-feature-trigger.png b/images/guides/branches-pull-requests/git-flow-feature-trigger.png new file mode 100644 index 000000000..4daff7629 Binary files /dev/null and b/images/guides/branches-pull-requests/git-flow-feature-trigger.png differ diff --git a/images/guides/branches-pull-requests/git-flow-release-pipeline-trigger.png b/images/guides/branches-pull-requests/git-flow-release-pipeline-trigger.png new file mode 100644 index 000000000..0f3557bc7 Binary files /dev/null and b/images/guides/branches-pull-requests/git-flow-release-pipeline-trigger.png differ diff --git a/images/guides/branches-pull-requests/pr-event.png b/images/guides/branches-pull-requests/pr-event.png new file mode 100644 index 000000000..0342ab30f Binary files /dev/null and b/images/guides/branches-pull-requests/pr-event.png differ diff --git a/images/guides/branches-pull-requests/production-pipeline.png b/images/guides/branches-pull-requests/production-pipeline.png new file mode 100644 index 000000000..0fc7a6ee6 Binary files /dev/null and b/images/guides/branches-pull-requests/production-pipeline.png differ diff --git a/images/guides/branches-pull-requests/restrict-branch.png b/images/guides/branches-pull-requests/restrict-branch.png new file mode 100644 index 000000000..74af2035c Binary files /dev/null and b/images/guides/branches-pull-requests/restrict-branch.png differ diff --git a/images/guides/branches-pull-requests/trigger-for-features.png b/images/guides/branches-pull-requests/trigger-for-features.png new file mode 100644 index 000000000..0599cc653 Binary files /dev/null and b/images/guides/branches-pull-requests/trigger-for-features.png differ diff --git a/images/guides/branches-pull-requests/trigger-for-production-pipeline.png b/images/guides/branches-pull-requests/trigger-for-production-pipeline.png new file mode 100644 index 000000000..8ce454489 Binary files /dev/null and b/images/guides/branches-pull-requests/trigger-for-production-pipeline.png differ diff --git a/images/guides/branches-pull-requests/trunk-based-development.png b/images/guides/branches-pull-requests/trunk-based-development.png new file mode 100644 index 000000000..a311d046a Binary files /dev/null and b/images/guides/branches-pull-requests/trunk-based-development.png differ diff --git a/images/guides/build-docker-images/automatic-docker-push.png b/images/guides/build-docker-images/automatic-docker-push.png new file mode 100644 index 000000000..0a4d45b5d Binary files /dev/null and b/images/guides/build-docker-images/automatic-docker-push.png differ diff --git a/images/guides/build-docker-images/docker-image-dashboard.png b/images/guides/build-docker-images/docker-image-dashboard.png new file mode 100644 index 000000000..4142b8113 Binary files /dev/null and b/images/guides/build-docker-images/docker-image-dashboard.png differ diff --git a/images/guides/build-docker-images/multi-stage-pipeline.png b/images/guides/build-docker-images/multi-stage-pipeline.png new file mode 100644 index 000000000..66ff7e796 Binary files /dev/null and b/images/guides/build-docker-images/multi-stage-pipeline.png differ diff --git a/images/guides/build-docker-images/non-multi-stage-pipeline.png b/images/guides/build-docker-images/non-multi-stage-pipeline.png new file mode 100644 index 000000000..9bba91865 Binary files /dev/null and b/images/guides/build-docker-images/non-multi-stage-pipeline.png differ diff --git a/images/guides/build-docker-images/package-only-pipeline.png b/images/guides/build-docker-images/package-only-pipeline.png new file mode 100644 index 000000000..7739db0a7 Binary files /dev/null and b/images/guides/build-docker-images/package-only-pipeline.png differ diff --git a/images/guides/config-maps/add-new-single-variable.png b/images/guides/config-maps/add-new-single-variable.png new file mode 100644 index 000000000..bf67b731c Binary files /dev/null and b/images/guides/config-maps/add-new-single-variable.png differ diff --git a/images/guides/config-maps/change-view.png b/images/guides/config-maps/change-view.png new file mode 100644 index 000000000..6f5cd64e5 Binary files /dev/null and b/images/guides/config-maps/change-view.png differ diff --git a/images/guides/config-maps/edit-remove-config-map-variables.png b/images/guides/config-maps/edit-remove-config-map-variables.png new file mode 100644 index 000000000..da9f4db42 Binary files /dev/null and b/images/guides/config-maps/edit-remove-config-map-variables.png differ diff --git a/images/guides/config-maps/import-variables-from-text.png b/images/guides/config-maps/import-variables-from-text.png new file mode 100644 index 000000000..a92d2c987 Binary files /dev/null and b/images/guides/config-maps/import-variables-from-text.png differ diff --git a/images/guides/config-maps/manage-maps-namespace.png b/images/guides/config-maps/manage-maps-namespace.png new file mode 100644 index 000000000..520d1d0c9 Binary files /dev/null and b/images/guides/config-maps/manage-maps-namespace.png differ diff --git a/images/guides/config-maps/new-config-map-settings.png b/images/guides/config-maps/new-config-map-settings.png new file mode 100644 index 000000000..42251d08e Binary files /dev/null and b/images/guides/config-maps/new-config-map-settings.png differ diff --git a/images/guides/config-maps/select-cluster-namespace.png b/images/guides/config-maps/select-cluster-namespace.png new file mode 100644 index 000000000..ef759f310 Binary files /dev/null and b/images/guides/config-maps/select-cluster-namespace.png differ diff --git a/images/guides/environments/board.png b/images/guides/environments/board.png new file mode 100644 index 000000000..bbd35458a Binary files /dev/null and b/images/guides/environments/board.png differ diff --git a/images/guides/environments/environments.png b/images/guides/environments/environments.png new file mode 100644 index 000000000..2ad21c0dd Binary files /dev/null and b/images/guides/environments/environments.png differ diff --git a/images/guides/gitops/app-of-apps-closed.png b/images/guides/gitops/app-of-apps-closed.png new file mode 100644 index 000000000..821365c79 Binary files /dev/null and b/images/guides/gitops/app-of-apps-closed.png differ diff --git a/images/guides/gitops/app-of-apps.png b/images/guides/gitops/app-of-apps.png new file mode 100644 index 000000000..c3d3538ae Binary files /dev/null and b/images/guides/gitops/app-of-apps.png differ diff --git a/images/guides/gitops/argo-application-name.png b/images/guides/gitops/argo-application-name.png new file mode 100644 index 000000000..b2da15a51 Binary files /dev/null and b/images/guides/gitops/argo-application-name.png differ diff --git a/images/guides/gitops/argo-context.png b/images/guides/gitops/argo-context.png new file mode 100644 index 000000000..954ef3a35 Binary files /dev/null and b/images/guides/gitops/argo-context.png differ diff --git a/images/guides/gitops/argo-sync-pipeline.png b/images/guides/gitops/argo-sync-pipeline.png new file mode 100644 index 000000000..da692f2d7 Binary files /dev/null and b/images/guides/gitops/argo-sync-pipeline.png differ diff --git a/images/guides/gitops/basic-ci-pipeline.png b/images/guides/gitops/basic-ci-pipeline.png new file mode 100644 index 000000000..180d8acef Binary files /dev/null and b/images/guides/gitops/basic-ci-pipeline.png differ diff --git a/images/guides/gitops/ci-cd-pipeline.png b/images/guides/gitops/ci-cd-pipeline.png new file mode 100644 index 000000000..44aa534a1 Binary files /dev/null and b/images/guides/gitops/ci-cd-pipeline.png differ diff --git a/images/guides/gitops/currentstate.png b/images/guides/gitops/currentstate.png new file mode 100644 index 000000000..dc8700078 Binary files /dev/null and b/images/guides/gitops/currentstate.png differ diff --git a/images/guides/gitops/dashboard.png b/images/guides/gitops/dashboard.png new file mode 100644 index 000000000..caf9b8323 Binary files /dev/null and b/images/guides/gitops/dashboard.png differ diff --git a/images/guides/gitops/disable-auto-sync.png b/images/guides/gitops/disable-auto-sync.png new file mode 100644 index 000000000..312ca3765 Binary files /dev/null and b/images/guides/gitops/disable-auto-sync.png differ diff --git a/images/guides/gitops/filter.png b/images/guides/gitops/filter.png new file mode 100644 index 000000000..6d484b5b6 Binary files /dev/null and b/images/guides/gitops/filter.png differ diff --git a/images/guides/gitops/gitops-applications.png b/images/guides/gitops/gitops-applications.png new file mode 100644 index 000000000..dfa558317 Binary files /dev/null and b/images/guides/gitops/gitops-applications.png differ diff --git a/images/guides/gitops/gitops-dashboard.png b/images/guides/gitops/gitops-dashboard.png new file mode 100644 index 000000000..0535ce772 Binary files /dev/null and b/images/guides/gitops/gitops-dashboard.png differ diff --git a/images/guides/gitops/gitops-environment.png b/images/guides/gitops/gitops-environment.png new file mode 100644 index 000000000..19b8989bf Binary files /dev/null and b/images/guides/gitops/gitops-environment.png differ diff --git a/images/guides/gitops/gitops-workflow.png b/images/guides/gitops/gitops-workflow.png new file mode 100644 index 000000000..2ccba0abe Binary files /dev/null and b/images/guides/gitops/gitops-workflow.png differ diff --git a/images/guides/gitops/image-annotations.png b/images/guides/gitops/image-annotations.png new file mode 100644 index 000000000..b1d63d934 Binary files /dev/null and b/images/guides/gitops/image-annotations.png differ diff --git a/images/guides/gitops/out-of-sync.png b/images/guides/gitops/out-of-sync.png new file mode 100644 index 000000000..a086d528e Binary files /dev/null and b/images/guides/gitops/out-of-sync.png differ diff --git a/images/guides/gitops/pipeline-from-git.png b/images/guides/gitops/pipeline-from-git.png new file mode 100644 index 000000000..582787fbb Binary files /dev/null and b/images/guides/gitops/pipeline-from-git.png differ diff --git a/images/guides/gitops/real-dashboard.png b/images/guides/gitops/real-dashboard.png new file mode 100644 index 000000000..ecee9a049 Binary files /dev/null and b/images/guides/gitops/real-dashboard.png differ diff --git a/images/guides/gitops/rollback.png b/images/guides/gitops/rollback.png new file mode 100644 index 000000000..93099de84 Binary files /dev/null and b/images/guides/gitops/rollback.png differ diff --git a/images/guides/gitops/search-history.png b/images/guides/gitops/search-history.png new file mode 100644 index 000000000..968655434 Binary files /dev/null and b/images/guides/gitops/search-history.png differ diff --git a/images/guides/gitops/search.png b/images/guides/gitops/search.png new file mode 100644 index 000000000..1bcc0a784 Binary files /dev/null and b/images/guides/gitops/search.png differ diff --git a/images/guides/gitops/tooltips.png b/images/guides/gitops/tooltips.png new file mode 100644 index 000000000..0c0d380cb Binary files /dev/null and b/images/guides/gitops/tooltips.png differ diff --git a/images/guides/gitops/updated-apps.png b/images/guides/gitops/updated-apps.png new file mode 100644 index 000000000..75fac53e7 Binary files /dev/null and b/images/guides/gitops/updated-apps.png differ diff --git a/images/guides/gitops/updated-services.png b/images/guides/gitops/updated-services.png new file mode 100644 index 000000000..3e610d362 Binary files /dev/null and b/images/guides/gitops/updated-services.png differ diff --git a/images/guides/helm-best-practices/advanced-promote.png b/images/guides/helm-best-practices/advanced-promote.png new file mode 100644 index 000000000..b8f53b29a Binary files /dev/null and b/images/guides/helm-best-practices/advanced-promote.png differ diff --git a/images/guides/helm-best-practices/basic-helm-pipeline.png b/images/guides/helm-best-practices/basic-helm-pipeline.png new file mode 100644 index 000000000..828930230 Binary files /dev/null and b/images/guides/helm-best-practices/basic-helm-pipeline.png differ diff --git a/images/guides/helm-best-practices/board.png b/images/guides/helm-best-practices/board.png new file mode 100644 index 000000000..bbd35458a Binary files /dev/null and b/images/guides/helm-best-practices/board.png differ diff --git a/images/guides/helm-best-practices/chart-structure.png b/images/guides/helm-best-practices/chart-structure.png new file mode 100644 index 000000000..d3559a525 Binary files /dev/null and b/images/guides/helm-best-practices/chart-structure.png differ diff --git a/images/guides/helm-best-practices/chart-structure.svg b/images/guides/helm-best-practices/chart-structure.svg new file mode 100644 index 000000000..102abb818 --- /dev/null +++ b/images/guides/helm-best-practices/chart-structure.svg @@ -0,0 +1,347 @@ + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + +   + image1 + + + image2 + + image3 + + Single Chart + +   + image1 + + + image2 + + + + image3 + + + Umbrella Chart + + + + Chart + Chart + + + + + diff --git a/images/guides/helm-best-practices/chart-version-multiple.png b/images/guides/helm-best-practices/chart-version-multiple.png new file mode 100644 index 000000000..2139d8caf Binary files /dev/null and b/images/guides/helm-best-practices/chart-version-multiple.png differ diff --git a/images/guides/helm-best-practices/chart-version-multiple.svg b/images/guides/helm-best-practices/chart-version-multiple.svg new file mode 100644 index 000000000..c387b7b8c --- /dev/null +++ b/images/guides/helm-best-practices/chart-version-multiple.svg @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + image A - 1.1 + + image b - 4.3 + + image c - 2.6 + + Chart 1.1 + +   + image A - 1.1 + + image b - 4.3 + + image c - 2.6 + + Chart 1.2 + + + chartbump +   + image A - 1.1 + + image b - 4.4 + + image c - 2.7 + + Chart 1.2 + + + appbump + + diff --git a/images/guides/helm-best-practices/chart-version-single.png b/images/guides/helm-best-practices/chart-version-single.png new file mode 100644 index 000000000..fe6eb44c8 Binary files /dev/null and b/images/guides/helm-best-practices/chart-version-single.png differ diff --git a/images/guides/helm-best-practices/chart-version-single.svg b/images/guides/helm-best-practices/chart-version-single.svg new file mode 100644 index 000000000..0661a841c --- /dev/null +++ b/images/guides/helm-best-practices/chart-version-single.svg @@ -0,0 +1,320 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + image A - 1.1 + + image b - 1.1 + + image c - 1.1 + + Chart 1.1 + +   + image A - 1.2 + + image b - 1.2 + + image c - 1.2 + + Chart 1.2 + + + new version + + diff --git a/images/guides/helm-best-practices/helm-catalog.png b/images/guides/helm-best-practices/helm-catalog.png new file mode 100644 index 000000000..f9db55d50 Binary files /dev/null and b/images/guides/helm-best-practices/helm-catalog.png differ diff --git a/images/guides/helm-best-practices/helm-direct-deployment.png b/images/guides/helm-best-practices/helm-direct-deployment.png new file mode 100644 index 000000000..a6aaba1ca Binary files /dev/null and b/images/guides/helm-best-practices/helm-direct-deployment.png differ diff --git a/images/guides/helm-best-practices/helm-no-repo.png b/images/guides/helm-best-practices/helm-no-repo.png new file mode 100644 index 000000000..db3d67ae0 Binary files /dev/null and b/images/guides/helm-best-practices/helm-no-repo.png differ diff --git a/images/guides/helm-best-practices/helm-only-store.png b/images/guides/helm-best-practices/helm-only-store.png new file mode 100644 index 000000000..26f72d40b Binary files /dev/null and b/images/guides/helm-best-practices/helm-only-store.png differ diff --git a/images/guides/helm-best-practices/helm-rollback.png b/images/guides/helm-best-practices/helm-rollback.png new file mode 100644 index 000000000..c337fe0f6 Binary files /dev/null and b/images/guides/helm-best-practices/helm-rollback.png differ diff --git a/images/guides/helm-best-practices/multiple-environments.png b/images/guides/helm-best-practices/multiple-environments.png new file mode 100644 index 000000000..1d55a28ab Binary files /dev/null and b/images/guides/helm-best-practices/multiple-environments.png differ diff --git a/images/guides/helm-best-practices/multiple-promotion.svg b/images/guides/helm-best-practices/multiple-promotion.svg new file mode 100644 index 000000000..01a939664 --- /dev/null +++ b/images/guides/helm-best-practices/multiple-promotion.svg @@ -0,0 +1,439 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + image A - 1.1 + + image b - 4.3 + + image c - 2.6 + + Chart 1.1 + +   + image A - 1.1 + + image b - 4.3 + + image c - 2.6 + + Chart 1.2 + + + chartbump +   + image A - 1.1 + + image b - 4.4 + + image c - 2.7 + + Chart 1.2 + + + appbump + + diff --git a/images/guides/helm-best-practices/promote-1.png b/images/guides/helm-best-practices/promote-1.png new file mode 100644 index 000000000..622137fb0 Binary files /dev/null and b/images/guides/helm-best-practices/promote-1.png differ diff --git a/images/guides/helm-best-practices/promote-2.png b/images/guides/helm-best-practices/promote-2.png new file mode 100644 index 000000000..9ee567f1c Binary files /dev/null and b/images/guides/helm-best-practices/promote-2.png differ diff --git a/images/guides/helm-best-practices/push-and-deploy.png b/images/guides/helm-best-practices/push-and-deploy.png new file mode 100644 index 000000000..7efbb2e92 Binary files /dev/null and b/images/guides/helm-best-practices/push-and-deploy.png differ diff --git a/images/guides/helm-best-practices/value-options.png b/images/guides/helm-best-practices/value-options.png new file mode 100644 index 000000000..5ce6820f5 Binary files /dev/null and b/images/guides/helm-best-practices/value-options.png differ diff --git a/images/guides/kubernetes/create-secret.png b/images/guides/kubernetes/create-secret.png new file mode 100644 index 000000000..2e801c1d5 Binary files /dev/null and b/images/guides/kubernetes/create-secret.png differ diff --git a/images/guides/kubernetes/secret-dropdown.png b/images/guides/kubernetes/secret-dropdown.png new file mode 100644 index 000000000..ac49669de Binary files /dev/null and b/images/guides/kubernetes/secret-dropdown.png differ diff --git a/images/guides/microservices/.keep b/images/guides/microservices/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/images/guides/microservices/add-new-microservice.png b/images/guides/microservices/add-new-microservice.png new file mode 100644 index 000000000..58b965bc7 Binary files /dev/null and b/images/guides/microservices/add-new-microservice.png differ diff --git a/images/guides/microservices/add-new-microservice.svg b/images/guides/microservices/add-new-microservice.svg new file mode 100644 index 000000000..fd393f6bd --- /dev/null +++ b/images/guides/microservices/add-new-microservice.svg @@ -0,0 +1,1348 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + +   + step + + +   + + + Pipeline + + +   + step + + + +   + step + + + + Trigger 1 + + + + + Trigger 2 + + + + Trigger 3 + + + + Trigger 4 + + + + + + + Microservice N + + Trigger N + + + + diff --git a/images/guides/microservices/microservice-pipelines.png b/images/guides/microservices/microservice-pipelines.png new file mode 100644 index 000000000..4a8f5119b Binary files /dev/null and b/images/guides/microservices/microservice-pipelines.png differ diff --git a/images/guides/microservices/microservice-pipelines.svg b/images/guides/microservices/microservice-pipelines.svg new file mode 100644 index 000000000..c254d191d --- /dev/null +++ b/images/guides/microservices/microservice-pipelines.svg @@ -0,0 +1,1479 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + +   + + Monolithic application + Application split to 4 microservices + +   + step + + +   + + + Pipeline + + +   + step + + + +   + step + + + + Trigger 1 + + + + + Trigger 2 + + + + Trigger 3 + + + + Trigger 4 + + + + + + + diff --git a/images/guides/microservices/monolithic-pipelines.png b/images/guides/microservices/monolithic-pipelines.png new file mode 100644 index 000000000..c3e2c59b8 Binary files /dev/null and b/images/guides/microservices/monolithic-pipelines.png differ diff --git a/images/guides/microservices/monolithic-pipelines.svg b/images/guides/microservices/monolithic-pipelines.svg new file mode 100644 index 000000000..22153d418 --- /dev/null +++ b/images/guides/microservices/monolithic-pipelines.svg @@ -0,0 +1,859 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + +   + step + + +   + Git Repo 1 + + + + Pipeline 1 + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo 2 + + + + Pipeline 2 + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo 3 + + + + Pipeline 3 + + + +   + step + + + +   + step + + + + diff --git a/images/guides/microservices/moving-to-microservices.png b/images/guides/microservices/moving-to-microservices.png new file mode 100644 index 000000000..246f12fd3 Binary files /dev/null and b/images/guides/microservices/moving-to-microservices.png differ diff --git a/images/guides/microservices/moving-to-microservices.svg b/images/guides/microservices/moving-to-microservices.svg new file mode 100644 index 000000000..79c30c576 --- /dev/null +++ b/images/guides/microservices/moving-to-microservices.svg @@ -0,0 +1,7088 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + +   + step + + +   + Git Repo + + + + Pipeline + + + +   + step + + + +   + step + + + + 3 monolithic applications + Each application split to 4 microservices + + diff --git a/images/guides/microservices/multiple-triggers.png b/images/guides/microservices/multiple-triggers.png new file mode 100644 index 000000000..cb5fea257 Binary files /dev/null and b/images/guides/microservices/multiple-triggers.png differ diff --git a/images/guides/microservices/shared-pipelines.png b/images/guides/microservices/shared-pipelines.png new file mode 100644 index 000000000..0818f5843 Binary files /dev/null and b/images/guides/microservices/shared-pipelines.png differ diff --git a/images/guides/microservices/shared-pipelines.svg b/images/guides/microservices/shared-pipelines.svg new file mode 100644 index 000000000..c1bb66acf --- /dev/null +++ b/images/guides/microservices/shared-pipelines.svg @@ -0,0 +1,1545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + library A + +   + + +   + + + Sharedpipelinesegments +   + library B + +   + library C + +   + library D + + + Pipeline 1 + Pipeline 2 + Pipeline 3 +   + library A + + +   + library C + +   + library B + +   + library C + +   + library A + +   + library D + +   + library B + + + + + + +   + custom + +   + custom + +   + custom + + + diff --git a/images/guides/microservices/single-pipeline.png b/images/guides/microservices/single-pipeline.png new file mode 100644 index 000000000..bf6526a6a Binary files /dev/null and b/images/guides/microservices/single-pipeline.png differ diff --git a/images/guides/preview-environments/close-events.png b/images/guides/preview-environments/close-events.png new file mode 100644 index 000000000..dbc19a574 Binary files /dev/null and b/images/guides/preview-environments/close-events.png differ diff --git a/images/guides/preview-environments/demo-path.png b/images/guides/preview-environments/demo-path.png new file mode 100644 index 000000000..22804d64e Binary files /dev/null and b/images/guides/preview-environments/demo-path.png differ diff --git a/images/guides/preview-environments/dynamic-environments.png b/images/guides/preview-environments/dynamic-environments.png new file mode 100644 index 000000000..6db58cdb2 Binary files /dev/null and b/images/guides/preview-environments/dynamic-environments.png differ diff --git a/images/guides/preview-environments/pr-events.png b/images/guides/preview-environments/pr-events.png new file mode 100644 index 000000000..7ff5e67f2 Binary files /dev/null and b/images/guides/preview-environments/pr-events.png differ diff --git a/images/guides/preview-environments/pull-request-closed-pipeline.png b/images/guides/preview-environments/pull-request-closed-pipeline.png new file mode 100644 index 000000000..e4323fdd4 Binary files /dev/null and b/images/guides/preview-environments/pull-request-closed-pipeline.png differ diff --git a/images/guides/preview-environments/pull-request-comment.png b/images/guides/preview-environments/pull-request-comment.png new file mode 100644 index 000000000..bb4e9d3a5 Binary files /dev/null and b/images/guides/preview-environments/pull-request-comment.png differ diff --git a/images/guides/preview-environments/pull-request-preview-pipeline.png b/images/guides/preview-environments/pull-request-preview-pipeline.png new file mode 100644 index 000000000..beb6aff04 Binary files /dev/null and b/images/guides/preview-environments/pull-request-preview-pipeline.png differ diff --git a/images/guides/preview-environments/static-environments.png b/images/guides/preview-environments/static-environments.png new file mode 100644 index 000000000..54af5b138 Binary files /dev/null and b/images/guides/preview-environments/static-environments.png differ diff --git a/images/guides/progressive-delivery/01_canary_initial_state.png b/images/guides/progressive-delivery/01_canary_initial_state.png new file mode 100644 index 000000000..8349a9013 Binary files /dev/null and b/images/guides/progressive-delivery/01_canary_initial_state.png differ diff --git a/images/guides/progressive-delivery/01_initial.png b/images/guides/progressive-delivery/01_initial.png new file mode 100644 index 000000000..592e051c5 Binary files /dev/null and b/images/guides/progressive-delivery/01_initial.png differ diff --git a/images/guides/progressive-delivery/02_canary_10.png b/images/guides/progressive-delivery/02_canary_10.png new file mode 100644 index 000000000..ff61e8591 Binary files /dev/null and b/images/guides/progressive-delivery/02_canary_10.png differ diff --git a/images/guides/progressive-delivery/02_two_colors.png b/images/guides/progressive-delivery/02_two_colors.png new file mode 100644 index 000000000..aaae6e7a3 Binary files /dev/null and b/images/guides/progressive-delivery/02_two_colors.png differ diff --git a/images/guides/progressive-delivery/03_canary_33.png b/images/guides/progressive-delivery/03_canary_33.png new file mode 100644 index 000000000..084d41fb9 Binary files /dev/null and b/images/guides/progressive-delivery/03_canary_33.png differ diff --git a/images/guides/progressive-delivery/03_switch_traffic.png b/images/guides/progressive-delivery/03_switch_traffic.png new file mode 100644 index 000000000..b271548cf Binary files /dev/null and b/images/guides/progressive-delivery/03_switch_traffic.png differ diff --git a/images/guides/progressive-delivery/04_canary_finished.png b/images/guides/progressive-delivery/04_canary_finished.png new file mode 100644 index 000000000..8feac3f69 Binary files /dev/null and b/images/guides/progressive-delivery/04_canary_finished.png differ diff --git a/images/guides/progressive-delivery/04_scale_down.png b/images/guides/progressive-delivery/04_scale_down.png new file mode 100644 index 000000000..32628ccd3 Binary files /dev/null and b/images/guides/progressive-delivery/04_scale_down.png differ diff --git a/images/guides/progressive-delivery/approval-pipeline.png b/images/guides/progressive-delivery/approval-pipeline.png new file mode 100644 index 000000000..81b16633a Binary files /dev/null and b/images/guides/progressive-delivery/approval-pipeline.png differ diff --git a/images/guides/progressive-delivery/blue-green.svg b/images/guides/progressive-delivery/blue-green.svg new file mode 100644 index 000000000..b3a1a9c32 --- /dev/null +++ b/images/guides/progressive-delivery/blue-green.svg @@ -0,0 +1,1171 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + ApplicationVersion 34 + Users + + + + + + + + + + Load Balancer + + Live traffic + + 1- Initial version + + + ApplicationVersion 34 + Users + + + + + + + + Load Balancer + + Live traffic + + 2- New version deployed + + ApplicationVersion 35 + + + ApplicationVersion 34 + Users + + + + + + + + Load Balancer + + Live traffic + + 3- Switch Traffic + + ApplicationVersion 35 + + ApplicationVersion 34 + Users + + + + + + + + Load Balancer + + Live traffic + + 4- Finish + + ApplicationVersion 35 + + diff --git a/images/guides/progressive-delivery/canary-decision.png b/images/guides/progressive-delivery/canary-decision.png new file mode 100644 index 000000000..49bb34b5c Binary files /dev/null and b/images/guides/progressive-delivery/canary-decision.png differ diff --git a/images/guides/progressive-delivery/canary-manual-approval-pipeline.png b/images/guides/progressive-delivery/canary-manual-approval-pipeline.png new file mode 100644 index 000000000..4cf14038f Binary files /dev/null and b/images/guides/progressive-delivery/canary-manual-approval-pipeline.png differ diff --git a/images/guides/progressive-delivery/canary-metrics-pipeline.png b/images/guides/progressive-delivery/canary-metrics-pipeline.png new file mode 100644 index 000000000..584254b31 Binary files /dev/null and b/images/guides/progressive-delivery/canary-metrics-pipeline.png differ diff --git a/images/guides/progressive-delivery/canary-metrics.png b/images/guides/progressive-delivery/canary-metrics.png new file mode 100644 index 000000000..f392d0154 Binary files /dev/null and b/images/guides/progressive-delivery/canary-metrics.png differ diff --git a/images/guides/progressive-delivery/canary-traffic-split.png b/images/guides/progressive-delivery/canary-traffic-split.png new file mode 100644 index 000000000..e6a86a824 Binary files /dev/null and b/images/guides/progressive-delivery/canary-traffic-split.png differ diff --git a/images/guides/progressive-delivery/canary-watch-metrics.png b/images/guides/progressive-delivery/canary-watch-metrics.png new file mode 100644 index 000000000..980254357 Binary files /dev/null and b/images/guides/progressive-delivery/canary-watch-metrics.png differ diff --git a/images/guides/progressive-delivery/canary-watch.png b/images/guides/progressive-delivery/canary-watch.png new file mode 100644 index 000000000..8ec00c6b7 Binary files /dev/null and b/images/guides/progressive-delivery/canary-watch.png differ diff --git a/images/guides/progressive-delivery/canary.svg b/images/guides/progressive-delivery/canary.svg new file mode 100644 index 000000000..9fa54aafc --- /dev/null +++ b/images/guides/progressive-delivery/canary.svg @@ -0,0 +1,1259 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + ApplicationVersion 34 + Users + + + + + + + + + + Load Balancer + + Live traffic + + 1- Initial version + + + ApplicationVersion 34 + Users + + + + + + + + Load Balancer + + Live traffic + + 2- New version used by 10% of users + + ApplicationVersion 35 + + + ApplicationVersion 34 + Users + + + + + + + + Load Balancer + + Live traffic + + 3- New version used by 33% of users + + ApplicationVersion 35 + + ApplicationVersion 34 + Users + + + + + + + + Load Balancer + + Live traffic + + 4- New version is used by all users + + ApplicationVersion 35 + + + 10 % + 90 % + 66 % + 33 % + 100 % + + diff --git a/images/guides/progressive-delivery/graphana-dashboard.png b/images/guides/progressive-delivery/graphana-dashboard.png new file mode 100644 index 000000000..831894b8b Binary files /dev/null and b/images/guides/progressive-delivery/graphana-dashboard.png differ diff --git a/images/guides/progressive-delivery/how-blue-green-works.png b/images/guides/progressive-delivery/how-blue-green-works.png new file mode 100644 index 000000000..c3d1d1757 Binary files /dev/null and b/images/guides/progressive-delivery/how-blue-green-works.png differ diff --git a/images/guides/progressive-delivery/how-canary-deployments-work.png b/images/guides/progressive-delivery/how-canary-deployments-work.png new file mode 100644 index 000000000..473f710cc Binary files /dev/null and b/images/guides/progressive-delivery/how-canary-deployments-work.png differ diff --git a/images/guides/progressive-delivery/monitor-argo-rollouts.png b/images/guides/progressive-delivery/monitor-argo-rollouts.png new file mode 100644 index 000000000..02187a728 Binary files /dev/null and b/images/guides/progressive-delivery/monitor-argo-rollouts.png differ diff --git a/images/guides/progressive-delivery/monitor-rollout.png b/images/guides/progressive-delivery/monitor-rollout.png new file mode 100644 index 000000000..99eba43ef Binary files /dev/null and b/images/guides/progressive-delivery/monitor-rollout.png differ diff --git a/images/guides/progressive-delivery/smoke-tests-pipeline.png b/images/guides/progressive-delivery/smoke-tests-pipeline.png new file mode 100644 index 000000000..690f5790f Binary files /dev/null and b/images/guides/progressive-delivery/smoke-tests-pipeline.png differ diff --git a/images/guides/promotion/deployment-dashboard.png b/images/guides/promotion/deployment-dashboard.png new file mode 100644 index 000000000..c60079e0a Binary files /dev/null and b/images/guides/promotion/deployment-dashboard.png differ diff --git a/images/guides/promotion/different-settings.png b/images/guides/promotion/different-settings.png new file mode 100644 index 000000000..5d12b5710 Binary files /dev/null and b/images/guides/promotion/different-settings.png differ diff --git a/images/guides/promotion/helm-releases.png b/images/guides/promotion/helm-releases.png new file mode 100644 index 000000000..8744a7849 Binary files /dev/null and b/images/guides/promotion/helm-releases.png differ diff --git a/images/guides/promotion/helm-values.png b/images/guides/promotion/helm-values.png new file mode 100644 index 000000000..909105f18 Binary files /dev/null and b/images/guides/promotion/helm-values.png differ diff --git a/images/guides/promotion/history.png b/images/guides/promotion/history.png new file mode 100644 index 000000000..c87f6ce51 Binary files /dev/null and b/images/guides/promotion/history.png differ diff --git a/images/guides/promotion/image-promotion.png b/images/guides/promotion/image-promotion.png new file mode 100644 index 000000000..e5eacc1ab Binary files /dev/null and b/images/guides/promotion/image-promotion.png differ diff --git a/images/guides/promotion/non-production-deployment.png b/images/guides/promotion/non-production-deployment.png new file mode 100644 index 000000000..f65c0c243 Binary files /dev/null and b/images/guides/promotion/non-production-deployment.png differ diff --git a/images/guides/promotion/production-deployment.png b/images/guides/promotion/production-deployment.png new file mode 100644 index 000000000..179a4502c Binary files /dev/null and b/images/guides/promotion/production-deployment.png differ diff --git a/images/guides/promotion/production-pipeline.png b/images/guides/promotion/production-pipeline.png new file mode 100644 index 000000000..235b7ad17 Binary files /dev/null and b/images/guides/promotion/production-pipeline.png differ diff --git a/images/guides/promotion/services.png b/images/guides/promotion/services.png new file mode 100644 index 000000000..379218535 Binary files /dev/null and b/images/guides/promotion/services.png differ diff --git a/images/guides/promotion/staging-pipeline.png b/images/guides/promotion/staging-pipeline.png new file mode 100644 index 000000000..b73f3cecd Binary files /dev/null and b/images/guides/promotion/staging-pipeline.png differ diff --git a/images/guides/promotion/with-approval.png b/images/guides/promotion/with-approval.png new file mode 100644 index 000000000..b460f3f5d Binary files /dev/null and b/images/guides/promotion/with-approval.png differ diff --git a/images/guides/working-with-images/docker-image-promotion.png b/images/guides/working-with-images/docker-image-promotion.png new file mode 100644 index 000000000..43e98f354 Binary files /dev/null and b/images/guides/working-with-images/docker-image-promotion.png differ diff --git a/images/guides/working-with-images/docker-registry-filters.png b/images/guides/working-with-images/docker-registry-filters.png new file mode 100644 index 000000000..796cc91f1 Binary files /dev/null and b/images/guides/working-with-images/docker-registry-filters.png differ diff --git a/images/guides/working-with-images/docker-registry-list.png b/images/guides/working-with-images/docker-registry-list.png new file mode 100644 index 000000000..fb3237f7c Binary files /dev/null and b/images/guides/working-with-images/docker-registry-list.png differ diff --git a/images/guides/working-with-images/image-dashboard-tag.png b/images/guides/working-with-images/image-dashboard-tag.png new file mode 100644 index 000000000..1b1334269 Binary files /dev/null and b/images/guides/working-with-images/image-dashboard-tag.png differ diff --git a/images/guides/working-with-images/linked-docker-registries.png b/images/guides/working-with-images/linked-docker-registries.png new file mode 100644 index 000000000..efa281453 Binary files /dev/null and b/images/guides/working-with-images/linked-docker-registries.png differ diff --git a/images/guides/working-with-images/primary-dockerhub.png b/images/guides/working-with-images/primary-dockerhub.png new file mode 100644 index 000000000..f504aeaf1 Binary files /dev/null and b/images/guides/working-with-images/primary-dockerhub.png differ diff --git a/images/guides/working-with-images/pull-private-image.png b/images/guides/working-with-images/pull-private-image.png new file mode 100644 index 000000000..917622093 Binary files /dev/null and b/images/guides/working-with-images/pull-private-image.png differ diff --git a/images/guides/working-with-images/pull-public-image.png b/images/guides/working-with-images/pull-public-image.png new file mode 100644 index 000000000..ee6888de6 Binary files /dev/null and b/images/guides/working-with-images/pull-public-image.png differ diff --git a/images/guides/working-with-images/registry-prefix.png b/images/guides/working-with-images/registry-prefix.png new file mode 100644 index 000000000..acc312927 Binary files /dev/null and b/images/guides/working-with-images/registry-prefix.png differ diff --git a/images/guides/working-with-images/two-gcr-integrations.png b/images/guides/working-with-images/two-gcr-integrations.png new file mode 100644 index 000000000..b1eddfd2b Binary files /dev/null and b/images/guides/working-with-images/two-gcr-integrations.png differ diff --git a/images/administration/installation/architecture-high-level.png b/images/installation/architecture-high-level.png similarity index 100% rename from images/administration/installation/architecture-high-level.png rename to images/installation/architecture-high-level.png diff --git a/images/administration/installation/codefresh-saas.png b/images/installation/codefresh-saas.png similarity index 100% rename from images/administration/installation/codefresh-saas.png rename to images/installation/codefresh-saas.png diff --git a/images/administration/installation/hybrid-installation.png b/images/installation/hybrid-installation.png similarity index 100% rename from images/administration/installation/hybrid-installation.png rename to images/installation/hybrid-installation.png diff --git a/images/installation/soc2-type2-certified.png b/images/installation/soc2-type2-certified.png new file mode 100644 index 000000000..b1e59f37d Binary files /dev/null and b/images/installation/soc2-type2-certified.png differ diff --git a/images/administration/installation/topology-new.png b/images/installation/topology-new.png similarity index 100% rename from images/administration/installation/topology-new.png rename to images/installation/topology-new.png diff --git a/images/administration/installation/topology.png b/images/installation/topology.png similarity index 100% rename from images/administration/installation/topology.png rename to images/installation/topology.png diff --git a/images/integrations/aws/amazon-s3-helm-repo.png b/images/integrations/aws/amazon-s3-helm-repo.png new file mode 100644 index 000000000..be5855ddf Binary files /dev/null and b/images/integrations/aws/amazon-s3-helm-repo.png differ diff --git a/images/integrations/aws/amazon-storage.png b/images/integrations/aws/amazon-storage.png new file mode 100644 index 000000000..66a6ea71a Binary files /dev/null and b/images/integrations/aws/amazon-storage.png differ diff --git a/images/integrations/aws/aws-integration.png b/images/integrations/aws/aws-integration.png new file mode 100644 index 000000000..30fedd8ba Binary files /dev/null and b/images/integrations/aws/aws-integration.png differ diff --git a/images/integrations/datadog/datadog-api-key.png b/images/integrations/datadog/datadog-api-key.png new file mode 100644 index 000000000..8d68009c5 Binary files /dev/null and b/images/integrations/datadog/datadog-api-key.png differ diff --git a/images/integrations/datadog/datadog-config-settings.png b/images/integrations/datadog/datadog-config-settings.png new file mode 100644 index 000000000..f94ba9e3b Binary files /dev/null and b/images/integrations/datadog/datadog-config-settings.png differ diff --git a/images/integrations/datadog/datadog-edit.png b/images/integrations/datadog/datadog-edit.png new file mode 100644 index 000000000..8bfb31325 Binary files /dev/null and b/images/integrations/datadog/datadog-edit.png differ diff --git a/images/integrations/datadog/datadog-pipeline-dashboard.png b/images/integrations/datadog/datadog-pipeline-dashboard.png new file mode 100644 index 000000000..256b48957 Binary files /dev/null and b/images/integrations/datadog/datadog-pipeline-dashboard.png differ diff --git a/images/integrations/datadog/datadog-pipeline-drilldown.png b/images/integrations/datadog/datadog-pipeline-drilldown.png new file mode 100644 index 000000000..15048226a Binary files /dev/null and b/images/integrations/datadog/datadog-pipeline-drilldown.png differ diff --git a/images/integrations/datadog/datadog-pipeline-drilldown.xcf b/images/integrations/datadog/datadog-pipeline-drilldown.xcf new file mode 100644 index 000000000..c920c0190 Binary files /dev/null and b/images/integrations/datadog/datadog-pipeline-drilldown.xcf differ diff --git a/images/integrations/datadog/datadog-pipeline-executions.png b/images/integrations/datadog/datadog-pipeline-executions.png new file mode 100644 index 000000000..9a184ea8a Binary files /dev/null and b/images/integrations/datadog/datadog-pipeline-executions.png differ diff --git a/images/integrations/datadog/datadog-pipelines-page.png b/images/integrations/datadog/datadog-pipelines-page.png new file mode 100644 index 000000000..1f01463b8 Binary files /dev/null and b/images/integrations/datadog/datadog-pipelines-page.png differ diff --git a/images/integrations/docker-registries/add-amazon-ecr-registry.png b/images/integrations/docker-registries/add-amazon-ecr-registry.png new file mode 100644 index 000000000..4abb2a306 Binary files /dev/null and b/images/integrations/docker-registries/add-amazon-ecr-registry.png differ diff --git a/images/integrations/docker-registries/add-azure-registry.png b/images/integrations/docker-registries/add-azure-registry.png new file mode 100644 index 000000000..414c6e4fa Binary files /dev/null and b/images/integrations/docker-registries/add-azure-registry.png differ diff --git a/images/integrations/docker-registries/add-bintray-registry.png b/images/integrations/docker-registries/add-bintray-registry.png new file mode 100644 index 000000000..dd748ea3d Binary files /dev/null and b/images/integrations/docker-registries/add-bintray-registry.png differ diff --git a/images/integrations/docker-registries/add-docker-registry.png b/images/integrations/docker-registries/add-docker-registry.png new file mode 100644 index 000000000..a62a1eda4 Binary files /dev/null and b/images/integrations/docker-registries/add-docker-registry.png differ diff --git a/images/integrations/docker-registries/add-gcr-registry.png b/images/integrations/docker-registries/add-gcr-registry.png new file mode 100644 index 000000000..90ea85000 Binary files /dev/null and b/images/integrations/docker-registries/add-gcr-registry.png differ diff --git a/images/integrations/docker-registries/add-other-docker-registry.png b/images/integrations/docker-registries/add-other-docker-registry.png new file mode 100644 index 000000000..e639e086e Binary files /dev/null and b/images/integrations/docker-registries/add-other-docker-registry.png differ diff --git a/images/integrations/docker-registries/add-quay-registry.png b/images/integrations/docker-registries/add-quay-registry.png new file mode 100644 index 000000000..edb3abec4 Binary files /dev/null and b/images/integrations/docker-registries/add-quay-registry.png differ diff --git a/images/integrations/docker-registries/bintray/bintray-api-key.png b/images/integrations/docker-registries/bintray/bintray-api-key.png new file mode 100644 index 000000000..ebe3c5576 Binary files /dev/null and b/images/integrations/docker-registries/bintray/bintray-api-key.png differ diff --git a/images/integrations/docker-registries/bintray/bintray-domain.png b/images/integrations/docker-registries/bintray/bintray-domain.png new file mode 100644 index 000000000..2ad961cd9 Binary files /dev/null and b/images/integrations/docker-registries/bintray/bintray-domain.png differ diff --git a/images/integrations/docker-registries/bintray/bintray-metadata.png b/images/integrations/docker-registries/bintray/bintray-metadata.png new file mode 100644 index 000000000..2fabfcb4e Binary files /dev/null and b/images/integrations/docker-registries/bintray/bintray-metadata.png differ diff --git a/images/integrations/docker-registries/bintray/bintray-set-me-up.png b/images/integrations/docker-registries/bintray/bintray-set-me-up.png new file mode 100644 index 000000000..eb9124c06 Binary files /dev/null and b/images/integrations/docker-registries/bintray/bintray-set-me-up.png differ diff --git a/images/integrations/docker-registries/digital-ocean/codefresh-docker-registry.png b/images/integrations/docker-registries/digital-ocean/codefresh-docker-registry.png new file mode 100644 index 000000000..6e5c34275 Binary files /dev/null and b/images/integrations/docker-registries/digital-ocean/codefresh-docker-registry.png differ diff --git a/images/integrations/docker-registries/digital-ocean/codefresh-pipeline.png b/images/integrations/docker-registries/digital-ocean/codefresh-pipeline.png new file mode 100644 index 000000000..63a6864c2 Binary files /dev/null and b/images/integrations/docker-registries/digital-ocean/codefresh-pipeline.png differ diff --git a/images/integrations/docker-registries/digital-ocean/container-registry-do.png b/images/integrations/docker-registries/digital-ocean/container-registry-do.png new file mode 100644 index 000000000..c5a5e31b4 Binary files /dev/null and b/images/integrations/docker-registries/digital-ocean/container-registry-do.png differ diff --git a/images/integrations/docker-registries/digital-ocean/create-registry.png b/images/integrations/docker-registries/digital-ocean/create-registry.png new file mode 100644 index 000000000..0497ab828 Binary files /dev/null and b/images/integrations/docker-registries/digital-ocean/create-registry.png differ diff --git a/images/integrations/docker-registries/dockerhub/add-dockerhub-registry.png b/images/integrations/docker-registries/dockerhub/add-dockerhub-registry.png new file mode 100644 index 000000000..20ac7491b Binary files /dev/null and b/images/integrations/docker-registries/dockerhub/add-dockerhub-registry.png differ diff --git a/images/integrations/docker-registries/dockerhub/pushing-two-dockerhub-accounts.png b/images/integrations/docker-registries/dockerhub/pushing-two-dockerhub-accounts.png new file mode 100644 index 000000000..adf423a26 Binary files /dev/null and b/images/integrations/docker-registries/dockerhub/pushing-two-dockerhub-accounts.png differ diff --git a/images/integrations/docker-registries/dockerhub/two-dockerhub-integrations.png b/images/integrations/docker-registries/dockerhub/two-dockerhub-integrations.png new file mode 100644 index 000000000..ed758e381 Binary files /dev/null and b/images/integrations/docker-registries/dockerhub/two-dockerhub-integrations.png differ diff --git a/images/integrations/docker-registries/ecr/ecr-manual-promote-button.png b/images/integrations/docker-registries/ecr/ecr-manual-promote-button.png new file mode 100644 index 000000000..338c08dd6 Binary files /dev/null and b/images/integrations/docker-registries/ecr/ecr-manual-promote-button.png differ diff --git a/images/integrations/docker-registries/ecr/ecr-manual-promote-repo-name.png b/images/integrations/docker-registries/ecr/ecr-manual-promote-repo-name.png new file mode 100644 index 000000000..047af00bc Binary files /dev/null and b/images/integrations/docker-registries/ecr/ecr-manual-promote-repo-name.png differ diff --git a/images/integrations/docker-registries/ecr/ecr-manual-promote-settings.png b/images/integrations/docker-registries/ecr/ecr-manual-promote-settings.png new file mode 100644 index 000000000..8d52e4291 Binary files /dev/null and b/images/integrations/docker-registries/ecr/ecr-manual-promote-settings.png differ diff --git a/images/integrations/docker-registries/github/github-registry-codefresh.png b/images/integrations/docker-registries/github/github-registry-codefresh.png new file mode 100644 index 000000000..656179f5b Binary files /dev/null and b/images/integrations/docker-registries/github/github-registry-codefresh.png differ diff --git a/images/integrations/docker-registries/github/github-registry-pipeline.png b/images/integrations/docker-registries/github/github-registry-pipeline.png new file mode 100644 index 000000000..51c462619 Binary files /dev/null and b/images/integrations/docker-registries/github/github-registry-pipeline.png differ diff --git a/images/integrations/docker-registries/github/manual-docker-push.png b/images/integrations/docker-registries/github/manual-docker-push.png new file mode 100644 index 000000000..74be26a2e Binary files /dev/null and b/images/integrations/docker-registries/github/manual-docker-push.png differ diff --git a/images/integrations/docker-registries/github/multiple-docker-tags.png b/images/integrations/docker-registries/github/multiple-docker-tags.png new file mode 100644 index 000000000..6196243da Binary files /dev/null and b/images/integrations/docker-registries/github/multiple-docker-tags.png differ diff --git a/images/integrations/docker-registries/google-artifact-registry-settings.png b/images/integrations/docker-registries/google-artifact-registry-settings.png new file mode 100644 index 000000000..9faa1ba98 Binary files /dev/null and b/images/integrations/docker-registries/google-artifact-registry-settings.png differ diff --git a/images/integrations/docker-registries/google-gcr-registry-settings.png b/images/integrations/docker-registries/google-gcr-registry-settings.png new file mode 100644 index 000000000..e085a0c1b Binary files /dev/null and b/images/integrations/docker-registries/google-gcr-registry-settings.png differ diff --git a/images/integrations/docker-registries/registry-name.png b/images/integrations/docker-registries/registry-name.png new file mode 100644 index 000000000..3d7b1c9a6 Binary files /dev/null and b/images/integrations/docker-registries/registry-name.png differ diff --git a/images/integrations/github-actions/environment-variables.png b/images/integrations/github-actions/environment-variables.png new file mode 100644 index 000000000..93393c4cd Binary files /dev/null and b/images/integrations/github-actions/environment-variables.png differ diff --git a/images/integrations/github-actions/github-action-pipeline.png b/images/integrations/github-actions/github-action-pipeline.png new file mode 100644 index 000000000..95be64ce5 Binary files /dev/null and b/images/integrations/github-actions/github-action-pipeline.png differ diff --git a/images/integrations/github-actions/github-action-step-browser.png b/images/integrations/github-actions/github-action-step-browser.png new file mode 100644 index 000000000..7bb1f9a5c Binary files /dev/null and b/images/integrations/github-actions/github-action-step-browser.png differ diff --git a/images/integrations/github-actions/github-actions-marketplace.png b/images/integrations/github-actions/github-actions-marketplace.png new file mode 100644 index 000000000..7d2fb0d3f Binary files /dev/null and b/images/integrations/github-actions/github-actions-marketplace.png differ diff --git a/images/integrations/github-actions/select-github-action.png b/images/integrations/github-actions/select-github-action.png new file mode 100644 index 000000000..dc586dfc5 Binary files /dev/null and b/images/integrations/github-actions/select-github-action.png differ diff --git a/images/integrations/github-actions/snyk-action-arguments.png b/images/integrations/github-actions/snyk-action-arguments.png new file mode 100644 index 000000000..cdce79526 Binary files /dev/null and b/images/integrations/github-actions/snyk-action-arguments.png differ diff --git a/images/integrations/jenkins/call-a-codefresh-pipeline.png b/images/integrations/jenkins/call-a-codefresh-pipeline.png new file mode 100644 index 000000000..d6d28a5b0 Binary files /dev/null and b/images/integrations/jenkins/call-a-codefresh-pipeline.png differ diff --git a/images/integrations/jenkins/calling-codefresh-from-jenkins.png b/images/integrations/jenkins/calling-codefresh-from-jenkins.png new file mode 100644 index 000000000..e857a46c9 Binary files /dev/null and b/images/integrations/jenkins/calling-codefresh-from-jenkins.png differ diff --git a/images/integrations/jenkins/calling-jenkins-from-codefresh.png b/images/integrations/jenkins/calling-jenkins-from-codefresh.png new file mode 100644 index 000000000..9a65af5a9 Binary files /dev/null and b/images/integrations/jenkins/calling-jenkins-from-codefresh.png differ diff --git a/images/integrations/jenkins/codefresh-logs-from-jenkins.png b/images/integrations/jenkins/codefresh-logs-from-jenkins.png new file mode 100644 index 000000000..4f864c45c Binary files /dev/null and b/images/integrations/jenkins/codefresh-logs-from-jenkins.png differ diff --git a/images/integrations/jenkins/jenkins-api-token.png b/images/integrations/jenkins/jenkins-api-token.png new file mode 100644 index 000000000..d53ab493a Binary files /dev/null and b/images/integrations/jenkins/jenkins-api-token.png differ diff --git a/images/integrations/jenkins/jenkins-credentials.png b/images/integrations/jenkins/jenkins-credentials.png new file mode 100644 index 000000000..46da80590 Binary files /dev/null and b/images/integrations/jenkins/jenkins-credentials.png differ diff --git a/images/integrations/jenkins/jenkins-freestyle-job.png b/images/integrations/jenkins/jenkins-freestyle-job.png new file mode 100644 index 000000000..9b14ce0a9 Binary files /dev/null and b/images/integrations/jenkins/jenkins-freestyle-job.png differ diff --git a/images/integrations/jenkins/jenkins-tool-installation.png b/images/integrations/jenkins/jenkins-tool-installation.png new file mode 100644 index 000000000..e498d2b59 Binary files /dev/null and b/images/integrations/jenkins/jenkins-tool-installation.png differ diff --git a/images/integrations/jenkins/jenkins-variables.png b/images/integrations/jenkins/jenkins-variables.png new file mode 100644 index 000000000..6bcb4e21b Binary files /dev/null and b/images/integrations/jenkins/jenkins-variables.png differ diff --git a/images/integrations/jenkins/migrate-jenkins-pipeline.png b/images/integrations/jenkins/migrate-jenkins-pipeline.png new file mode 100644 index 000000000..db9731f2d Binary files /dev/null and b/images/integrations/jenkins/migrate-jenkins-pipeline.png differ diff --git a/images/integrations/jenkins/pipeline-variables.png b/images/integrations/jenkins/pipeline-variables.png new file mode 100644 index 000000000..27ce44b83 Binary files /dev/null and b/images/integrations/jenkins/pipeline-variables.png differ diff --git a/images/integrations/jenkins/trigger-remote-jenkins-job.png b/images/integrations/jenkins/trigger-remote-jenkins-job.png new file mode 100644 index 000000000..dd1783f77 Binary files /dev/null and b/images/integrations/jenkins/trigger-remote-jenkins-job.png differ diff --git a/images/integrations/jira/account-settings.png b/images/integrations/jira/account-settings.png new file mode 100644 index 000000000..95db987d0 Binary files /dev/null and b/images/integrations/jira/account-settings.png differ diff --git a/images/integrations/jira/add-app.png b/images/integrations/jira/add-app.png new file mode 100644 index 000000000..d5457759c Binary files /dev/null and b/images/integrations/jira/add-app.png differ diff --git a/images/integrations/jira/add-jira-password.png b/images/integrations/jira/add-jira-password.png new file mode 100644 index 000000000..074bd1e9c Binary files /dev/null and b/images/integrations/jira/add-jira-password.png differ diff --git a/images/integrations/jira/client-key.png b/images/integrations/jira/client-key.png new file mode 100644 index 000000000..99ff95a65 Binary files /dev/null and b/images/integrations/jira/client-key.png differ diff --git a/images/integrations/jira/codefresh-dashboard.png b/images/integrations/jira/codefresh-dashboard.png new file mode 100644 index 000000000..3d9959f37 Binary files /dev/null and b/images/integrations/jira/codefresh-dashboard.png differ diff --git a/images/integrations/jira/codefreshpipeline.png b/images/integrations/jira/codefreshpipeline.png new file mode 100644 index 000000000..b0dedbd61 Binary files /dev/null and b/images/integrations/jira/codefreshpipeline.png differ diff --git a/images/integrations/jira/configure.png b/images/integrations/jira/configure.png new file mode 100644 index 000000000..46e55ade0 Binary files /dev/null and b/images/integrations/jira/configure.png differ diff --git a/images/integrations/jira/confirm.png b/images/integrations/jira/confirm.png new file mode 100644 index 000000000..93c4614f2 Binary files /dev/null and b/images/integrations/jira/confirm.png differ diff --git a/images/integrations/jira/jira-comment.png b/images/integrations/jira/jira-comment.png new file mode 100644 index 000000000..f9747daa6 Binary files /dev/null and b/images/integrations/jira/jira-comment.png differ diff --git a/images/integrations/jira/jira-integration-one.png b/images/integrations/jira/jira-integration-one.png new file mode 100644 index 000000000..0fb770643 Binary files /dev/null and b/images/integrations/jira/jira-integration-one.png differ diff --git a/images/integrations/jira/jira-integration-two.png b/images/integrations/jira/jira-integration-two.png new file mode 100644 index 000000000..5def27440 Binary files /dev/null and b/images/integrations/jira/jira-integration-two.png differ diff --git a/images/integrations/jira/jira-marketplace-auth.png b/images/integrations/jira/jira-marketplace-auth.png new file mode 100644 index 000000000..1f69b1d34 Binary files /dev/null and b/images/integrations/jira/jira-marketplace-auth.png differ diff --git a/images/integrations/jira/manage-apps.png b/images/integrations/jira/manage-apps.png new file mode 100644 index 000000000..588855644 Binary files /dev/null and b/images/integrations/jira/manage-apps.png differ diff --git a/images/integrations/kubernetes/add-cluster/add-cluster-button.png b/images/integrations/kubernetes/add-cluster/add-cluster-button.png new file mode 100644 index 000000000..4f618fb1b Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/add-cluster-button.png differ diff --git a/images/integrations/kubernetes/add-cluster/add-cluster-fields.png b/images/integrations/kubernetes/add-cluster/add-cluster-fields.png new file mode 100644 index 000000000..a245290c7 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/add-cluster-fields.png differ diff --git a/images/integrations/kubernetes/add-cluster/add-do-cluster.png b/images/integrations/kubernetes/add-cluster/add-do-cluster.png new file mode 100644 index 000000000..86a0d3511 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/add-do-cluster.png differ diff --git a/images/integrations/kubernetes/add-cluster/authorize-do.png b/images/integrations/kubernetes/add-cluster/authorize-do.png new file mode 100644 index 000000000..fa7f86c70 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/authorize-do.png differ diff --git a/images/integrations/kubernetes/add-cluster/cert-hierarchy.png b/images/integrations/kubernetes/add-cluster/cert-hierarchy.png new file mode 100644 index 000000000..f850381c2 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/cert-hierarchy.png differ diff --git a/images/integrations/kubernetes/add-cluster/connect-azure-spn.png b/images/integrations/kubernetes/add-cluster/connect-azure-spn.png new file mode 100644 index 000000000..39da41ef5 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/connect-azure-spn.png differ diff --git a/images/integrations/kubernetes/add-cluster/do-authorized.png b/images/integrations/kubernetes/add-cluster/do-authorized.png new file mode 100644 index 000000000..ad9f02615 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/do-authorized.png differ diff --git a/images/integrations/kubernetes/add-cluster/gke-basic-auth.png b/images/integrations/kubernetes/add-cluster/gke-basic-auth.png new file mode 100644 index 000000000..c5225fa8b Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/gke-basic-auth.png differ diff --git a/images/integrations/kubernetes/add-cluster/rancher-2.png b/images/integrations/kubernetes/add-cluster/rancher-2.png new file mode 100644 index 000000000..f1d17a3c6 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/rancher-2.png differ diff --git a/images/integrations/kubernetes/add-cluster/rancher-token.png b/images/integrations/kubernetes/add-cluster/rancher-token.png new file mode 100644 index 000000000..89efeb779 Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/rancher-token.png differ diff --git a/images/integrations/kubernetes/add-cluster/restrict-namespace.png b/images/integrations/kubernetes/add-cluster/restrict-namespace.png new file mode 100644 index 000000000..9999b36dd Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/restrict-namespace.png differ diff --git a/images/integrations/kubernetes/add-cluster/select-aks-cluster.png b/images/integrations/kubernetes/add-cluster/select-aks-cluster.png new file mode 100644 index 000000000..c2deb87df Binary files /dev/null and b/images/integrations/kubernetes/add-cluster/select-aks-cluster.png differ diff --git a/images/invite-users.png b/images/invite-users.png new file mode 100644 index 000000000..d29a06a4f Binary files /dev/null and b/images/invite-users.png differ diff --git a/images/pipeline/badges/view-public-logs.png b/images/pipeline/badges/view-public-logs.png index 395c40467..5589bd623 100644 Binary files a/images/pipeline/badges/view-public-logs.png and b/images/pipeline/badges/view-public-logs.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png b/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png new file mode 100644 index 000000000..90a0263c6 Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png b/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png new file mode 100644 index 000000000..6ec70a77c Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png b/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png new file mode 100644 index 000000000..f821f1edf Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png b/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png new file mode 100644 index 000000000..01ad84041 Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/approval/approval-rule.png b/images/pipeline/codefresh-yaml/approval/approval-rule.png new file mode 100644 index 000000000..b77e4bd2b Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/approval-rule.png differ diff --git a/images/pipeline/codefresh-yaml/approval/approval-waiting.png b/images/pipeline/codefresh-yaml/approval/approval-waiting.png new file mode 100644 index 000000000..88dbd12ab Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/approval-waiting.png differ diff --git a/images/pipeline/codefresh-yaml/approval/build-waiting.png b/images/pipeline/codefresh-yaml/approval/build-waiting.png new file mode 100644 index 000000000..e594f9b83 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/build-waiting.png differ diff --git a/images/pipeline/codefresh-yaml/approval/keep-volume.png b/images/pipeline/codefresh-yaml/approval/keep-volume.png new file mode 100644 index 000000000..d73b466a8 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/keep-volume.png differ diff --git a/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png b/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png new file mode 100644 index 000000000..44acd7616 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png differ diff --git a/images/pipeline/codefresh-yaml/approval/pipeline-tag.png b/images/pipeline/codefresh-yaml/approval/pipeline-tag.png new file mode 100644 index 000000000..6c5b7b7d8 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/pipeline-tag.png differ diff --git a/images/pipeline/codefresh-yaml/approval/slack-approval.png b/images/pipeline/codefresh-yaml/approval/slack-approval.png new file mode 100644 index 000000000..a5c3c9a4b Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/slack-approval.png differ diff --git a/images/pipeline/codefresh-yaml/approval/slack-settings.png b/images/pipeline/codefresh-yaml/approval/slack-settings.png new file mode 100644 index 000000000..0d88cd277 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/slack-settings.png differ diff --git a/images/pipeline/codefresh-yaml/docker-image-metadata/annotations-image-table.png b/images/pipeline/codefresh-yaml/docker-image-metadata/annotations-image-table.png new file mode 100644 index 000000000..954f75b17 Binary files /dev/null and b/images/pipeline/codefresh-yaml/docker-image-metadata/annotations-image-table.png differ diff --git a/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png b/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png new file mode 100644 index 000000000..7fe8d4db5 Binary files /dev/null and b/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png differ diff --git a/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png b/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png new file mode 100644 index 000000000..f6d29822c Binary files /dev/null and b/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png differ diff --git a/images/pipeline/codefresh-yaml/environments/environments.png b/images/pipeline/codefresh-yaml/environments/environments.png new file mode 100644 index 000000000..2ad21c0dd Binary files /dev/null and b/images/pipeline/codefresh-yaml/environments/environments.png differ diff --git a/images/pipeline/codefresh-yaml/environments/helm-environment.png b/images/pipeline/codefresh-yaml/environments/helm-environment.png new file mode 100644 index 000000000..0c3140343 Binary files /dev/null and b/images/pipeline/codefresh-yaml/environments/helm-environment.png differ diff --git a/images/pipeline/codefresh-yaml/environments/k8s-environment.png b/images/pipeline/codefresh-yaml/environments/k8s-environment.png new file mode 100644 index 000000000..a556f9d2c Binary files /dev/null and b/images/pipeline/codefresh-yaml/environments/k8s-environment.png differ diff --git a/images/pipeline/codefresh-yaml/existing-composition.png b/images/pipeline/codefresh-yaml/existing-composition.png new file mode 100644 index 000000000..93b91ef64 Binary files /dev/null and b/images/pipeline/codefresh-yaml/existing-composition.png differ diff --git a/images/pipeline/codefresh-yaml/hooks/before-pipeline.png b/images/pipeline/codefresh-yaml/hooks/before-pipeline.png new file mode 100644 index 000000000..f259f530d Binary files /dev/null and b/images/pipeline/codefresh-yaml/hooks/before-pipeline.png differ diff --git a/images/pipeline/codefresh-yaml/hooks/cleanup-step.png b/images/pipeline/codefresh-yaml/hooks/cleanup-step.png new file mode 100644 index 000000000..13e9a18de Binary files /dev/null and b/images/pipeline/codefresh-yaml/hooks/cleanup-step.png differ diff --git a/images/pipeline/codefresh-yaml/hooks/step-after.png b/images/pipeline/codefresh-yaml/hooks/step-after.png new file mode 100644 index 000000000..e7b482972 Binary files /dev/null and b/images/pipeline/codefresh-yaml/hooks/step-after.png differ diff --git a/images/pipeline/codefresh-yaml/inline-editor.png b/images/pipeline/codefresh-yaml/inline-editor.png new file mode 100644 index 000000000..f7ae6c6f5 Binary files /dev/null and b/images/pipeline/codefresh-yaml/inline-editor.png differ diff --git a/images/pipeline/codefresh-yaml/parallel-push.png b/images/pipeline/codefresh-yaml/parallel-push.png new file mode 100644 index 000000000..2afa2d21e Binary files /dev/null and b/images/pipeline/codefresh-yaml/parallel-push.png differ diff --git a/images/pipeline/codefresh-yaml/redis-example.png b/images/pipeline/codefresh-yaml/redis-example.png new file mode 100644 index 000000000..ec6219227 Binary files /dev/null and b/images/pipeline/codefresh-yaml/redis-example.png differ diff --git a/images/pipeline/codefresh-yaml/services/services-tab.png b/images/pipeline/codefresh-yaml/services/services-tab.png new file mode 100644 index 000000000..4e8aa081b Binary files /dev/null and b/images/pipeline/codefresh-yaml/services/services-tab.png differ diff --git a/images/pipeline/codefresh-yaml/stages/complex-pipeline.png b/images/pipeline/codefresh-yaml/stages/complex-pipeline.png new file mode 100644 index 000000000..862856df1 Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/complex-pipeline.png differ diff --git a/images/pipeline/codefresh-yaml/stages/complex.png b/images/pipeline/codefresh-yaml/stages/complex.png new file mode 100644 index 000000000..02ee476b1 Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/complex.png differ diff --git a/images/pipeline/codefresh-yaml/stages/example.png b/images/pipeline/codefresh-yaml/stages/example.png new file mode 100644 index 000000000..f08a9f744 Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/example.png differ diff --git a/images/pipeline/codefresh-yaml/stages/linear-view.png b/images/pipeline/codefresh-yaml/stages/linear-view.png new file mode 100644 index 000000000..829121af2 Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/linear-view.png differ diff --git a/images/pipeline/codefresh-yaml/steps/choose-step.png b/images/pipeline/codefresh-yaml/steps/choose-step.png new file mode 100644 index 000000000..f8a832afc Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/choose-step.png differ diff --git a/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png b/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png new file mode 100644 index 000000000..fb3237f7c Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png differ diff --git a/images/pipeline/codefresh-yaml/steps/create-custom-step.png b/images/pipeline/codefresh-yaml/steps/create-custom-step.png new file mode 100644 index 000000000..884da4efb Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/create-custom-step.png differ diff --git a/images/pipeline/codefresh-yaml/steps/create-plugin-image.png b/images/pipeline/codefresh-yaml/steps/create-plugin-image.png new file mode 100644 index 000000000..49d3741a2 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/create-plugin-image.png differ diff --git a/images/pipeline/codefresh-yaml/steps/example-git-providers.png b/images/pipeline/codefresh-yaml/steps/example-git-providers.png new file mode 100644 index 000000000..2362a2179 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/example-git-providers.png differ diff --git a/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png b/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png new file mode 100644 index 000000000..c7389ab98 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png differ diff --git a/images/pipeline/codefresh-yaml/steps/input-parameters.png b/images/pipeline/codefresh-yaml/steps/input-parameters.png new file mode 100644 index 000000000..63c11583d Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/input-parameters.png differ diff --git a/images/pipeline/codefresh-yaml/steps/multi-checkout.png b/images/pipeline/codefresh-yaml/steps/multi-checkout.png new file mode 100644 index 000000000..919f3f1aa Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/multi-checkout.png differ diff --git a/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png b/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png new file mode 100644 index 000000000..6bc7e4722 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png differ diff --git a/images/pipeline/codefresh-yaml/steps/plugin-parameters.png b/images/pipeline/codefresh-yaml/steps/plugin-parameters.png new file mode 100644 index 000000000..b273bfb8e Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/plugin-parameters.png differ diff --git a/images/pipeline/codefresh-yaml/steps/plugin-usage.png b/images/pipeline/codefresh-yaml/steps/plugin-usage.png new file mode 100644 index 000000000..3f0a53e41 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/plugin-usage.png differ diff --git a/images/pipeline/codefresh-yaml/steps/proxy-variables.png b/images/pipeline/codefresh-yaml/steps/proxy-variables.png new file mode 100644 index 000000000..26725f548 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/proxy-variables.png differ diff --git a/images/pipeline/codefresh-yaml/steps/step-versions.png b/images/pipeline/codefresh-yaml/steps/step-versions.png new file mode 100644 index 000000000..1976dc0c4 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/step-versions.png differ diff --git a/images/pipeline/codefresh-yaml/variables/encrypted-variables.png b/images/pipeline/codefresh-yaml/variables/encrypted-variables.png new file mode 100644 index 000000000..68f82a42b Binary files /dev/null and b/images/pipeline/codefresh-yaml/variables/encrypted-variables.png differ diff --git a/images/pipeline/codefresh-yaml/variables/masked-variables.png b/images/pipeline/codefresh-yaml/variables/masked-variables.png new file mode 100644 index 000000000..d097f4f22 Binary files /dev/null and b/images/pipeline/codefresh-yaml/variables/masked-variables.png differ diff --git a/images/pipeline/create/add-pipeline-to-project.png b/images/pipeline/create/add-pipeline-to-project.png new file mode 100644 index 000000000..41fd8dd8f Binary files /dev/null and b/images/pipeline/create/add-pipeline-to-project.png differ diff --git a/images/pipeline/create/create-template-menu.png b/images/pipeline/create/create-template-menu.png new file mode 100644 index 000000000..1259fa3ed Binary files /dev/null and b/images/pipeline/create/create-template-menu.png differ diff --git a/images/pipeline/create/custom-yml.png b/images/pipeline/create/custom-yml.png new file mode 100644 index 000000000..925d9ad8a Binary files /dev/null and b/images/pipeline/create/custom-yml.png differ diff --git a/images/pipeline/create/editor.png b/images/pipeline/create/editor.png new file mode 100644 index 000000000..cea04bef5 Binary files /dev/null and b/images/pipeline/create/editor.png differ diff --git a/images/pipeline/create/external-resources.png b/images/pipeline/create/external-resources.png new file mode 100644 index 000000000..559439dc4 Binary files /dev/null and b/images/pipeline/create/external-resources.png differ diff --git a/images/pipeline/create/inline-editor.png b/images/pipeline/create/inline-editor.png new file mode 100644 index 000000000..94c821666 Binary files /dev/null and b/images/pipeline/create/inline-editor.png differ diff --git a/images/pipeline/create/pipeline-from-internal-repo.png b/images/pipeline/create/pipeline-from-internal-repo.png new file mode 100644 index 000000000..29d47914f Binary files /dev/null and b/images/pipeline/create/pipeline-from-internal-repo.png differ diff --git a/images/pipeline/create/pipelines-from-repository.png b/images/pipeline/create/pipelines-from-repository.png new file mode 100644 index 000000000..b1205e539 Binary files /dev/null and b/images/pipeline/create/pipelines-from-repository.png differ diff --git a/images/pipeline/create/pipelines-no-repository.png b/images/pipeline/create/pipelines-no-repository.png new file mode 100644 index 000000000..7b9c7e39c Binary files /dev/null and b/images/pipeline/create/pipelines-no-repository.png differ diff --git a/images/pipeline/create/predefined-steps.png b/images/pipeline/create/predefined-steps.png new file mode 100644 index 000000000..15a6939dd Binary files /dev/null and b/images/pipeline/create/predefined-steps.png differ diff --git a/images/pipeline/create/set-build-disk-space.png b/images/pipeline/create/set-build-disk-space.png new file mode 100644 index 000000000..374d2cd02 Binary files /dev/null and b/images/pipeline/create/set-build-disk-space.png differ diff --git a/images/pipeline/create/template-tag.png b/images/pipeline/create/template-tag.png index bd950367a..17313a44b 100644 Binary files a/images/pipeline/create/template-tag.png and b/images/pipeline/create/template-tag.png differ diff --git a/images/pipeline/docker-image/example2.png b/images/pipeline/docker-image/example2.png new file mode 100644 index 000000000..954f75b17 Binary files /dev/null and b/images/pipeline/docker-image/example2.png differ diff --git a/images/pipeline/docker-image/quality-image-annotation.png b/images/pipeline/docker-image/quality-image-annotation.png new file mode 100644 index 000000000..395b83356 Binary files /dev/null and b/images/pipeline/docker-image/quality-image-annotation.png differ diff --git a/images/pipeline/monitoring/build-variables-list.png b/images/pipeline/monitoring/build-variables-list.png new file mode 100644 index 000000000..2b8d45fa6 Binary files /dev/null and b/images/pipeline/monitoring/build-variables-list.png differ diff --git a/images/pipeline/monitoring/build-variables-view-option.png b/images/pipeline/monitoring/build-variables-view-option.png new file mode 100644 index 000000000..41398f6f1 Binary files /dev/null and b/images/pipeline/monitoring/build-variables-view-option.png differ diff --git a/images/pipeline/monitoring/child-parent-build-info.png b/images/pipeline/monitoring/child-parent-build-info.png new file mode 100644 index 000000000..421318b07 Binary files /dev/null and b/images/pipeline/monitoring/child-parent-build-info.png differ diff --git a/images/pipeline/monitoring/icon-child-build.png b/images/pipeline/monitoring/icon-child-build.png new file mode 100644 index 000000000..6973f9af4 Binary files /dev/null and b/images/pipeline/monitoring/icon-child-build.png differ diff --git a/images/pipeline/monitoring/pipeline-view.png b/images/pipeline/monitoring/pipeline-view.png index ae7acacb4..90422f9dd 100644 Binary files a/images/pipeline/monitoring/pipeline-view.png and b/images/pipeline/monitoring/pipeline-view.png differ diff --git a/images/pipeline/monitoring/step-status-approved.png b/images/pipeline/monitoring/step-status-approved.png new file mode 100644 index 000000000..a6b1048de Binary files /dev/null and b/images/pipeline/monitoring/step-status-approved.png differ diff --git a/images/pipeline/monitoring/step-status-denied.png b/images/pipeline/monitoring/step-status-denied.png new file mode 100644 index 000000000..56b962ce5 Binary files /dev/null and b/images/pipeline/monitoring/step-status-denied.png differ diff --git a/images/pipeline/monitoring/step-status-error.png b/images/pipeline/monitoring/step-status-error.png new file mode 100644 index 000000000..fe591e40c Binary files /dev/null and b/images/pipeline/monitoring/step-status-error.png differ diff --git a/images/pipeline/monitoring/step-status-running-debug.png b/images/pipeline/monitoring/step-status-running-debug.png new file mode 100644 index 000000000..8175de430 Binary files /dev/null and b/images/pipeline/monitoring/step-status-running-debug.png differ diff --git a/images/pipeline/monitoring/step-status-running.png b/images/pipeline/monitoring/step-status-running.png new file mode 100644 index 000000000..0761f1211 Binary files /dev/null and b/images/pipeline/monitoring/step-status-running.png differ diff --git a/images/pipeline/monitoring/step-status-success.gif b/images/pipeline/monitoring/step-status-success.gif new file mode 100644 index 000000000..53cb7ace6 Binary files /dev/null and b/images/pipeline/monitoring/step-status-success.gif differ diff --git a/images/pipeline/monitoring/step-status-success.png b/images/pipeline/monitoring/step-status-success.png new file mode 100644 index 000000000..6303d1b1c Binary files /dev/null and b/images/pipeline/monitoring/step-status-success.png differ diff --git a/images/pipeline/monitoring/step-status-terminated.png b/images/pipeline/monitoring/step-status-terminated.png new file mode 100644 index 000000000..c30bc4493 Binary files /dev/null and b/images/pipeline/monitoring/step-status-terminated.png differ diff --git a/images/pipeline/monitoring/step-status-terminating.png b/images/pipeline/monitoring/step-status-terminating.png new file mode 100644 index 000000000..f38ebcdf7 Binary files /dev/null and b/images/pipeline/monitoring/step-status-terminating.png differ diff --git a/images/pipeline/pipeline-settings/pause-pipeline-enabled.png b/images/pipeline/pipeline-settings/pause-pipeline-enabled.png new file mode 100644 index 000000000..8c7c4305a Binary files /dev/null and b/images/pipeline/pipeline-settings/pause-pipeline-enabled.png differ diff --git a/images/pipeline/pipeline-settings/pipeline-settings-ui.png b/images/pipeline/pipeline-settings/pipeline-settings-ui.png new file mode 100644 index 000000000..105544c92 Binary files /dev/null and b/images/pipeline/pipeline-settings/pipeline-settings-ui.png differ diff --git a/images/pipeline/triggers/pr-comment-trigger-options.png b/images/pipeline/triggers/pr-comment-trigger-options.png new file mode 100644 index 000000000..854666ed9 Binary files /dev/null and b/images/pipeline/triggers/pr-comment-trigger-options.png differ diff --git a/images/quick-start/cdops-add-app-commit.png b/images/quick-start/cdops-add-app-commit.png new file mode 100644 index 000000000..2ddbeea4d Binary files /dev/null and b/images/quick-start/cdops-add-app-commit.png differ diff --git a/images/quick-start/cdops-add-app-configuration.png b/images/quick-start/cdops-add-app-configuration.png new file mode 100644 index 000000000..9c61f2645 Binary files /dev/null and b/images/quick-start/cdops-add-app-configuration.png differ diff --git a/images/quick-start/cdops-add-app-dashboard.png b/images/quick-start/cdops-add-app-dashboard.png new file mode 100644 index 000000000..c98d20a1c Binary files /dev/null and b/images/quick-start/cdops-add-app-dashboard.png differ diff --git a/images/quick-start/cdops-add-app-settings.png b/images/quick-start/cdops-add-app-settings.png new file mode 100644 index 000000000..b8a978f5d Binary files /dev/null and b/images/quick-start/cdops-add-app-settings.png differ diff --git a/images/quick-start/cdops-app-current-state.png b/images/quick-start/cdops-app-current-state.png new file mode 100644 index 000000000..d718a8f7d Binary files /dev/null and b/images/quick-start/cdops-app-current-state.png differ diff --git a/images/quick-start/cdops-app-empty-current-state.png b/images/quick-start/cdops-app-empty-current-state.png new file mode 100644 index 000000000..6d9181d49 Binary files /dev/null and b/images/quick-start/cdops-app-empty-current-state.png differ diff --git a/images/quick-start/cdops-app-install-rollout.png b/images/quick-start/cdops-app-install-rollout.png new file mode 100644 index 000000000..66b594f8a Binary files /dev/null and b/images/quick-start/cdops-app-install-rollout.png differ diff --git a/images/quick-start/cdops-app-rollout-in-dashboard.png b/images/quick-start/cdops-app-rollout-in-dashboard.png new file mode 100644 index 000000000..e97459b4c Binary files /dev/null and b/images/quick-start/cdops-app-rollout-in-dashboard.png differ diff --git a/images/quick-start/cdops-app-rollout-panel.png b/images/quick-start/cdops-app-rollout-panel.png new file mode 100644 index 000000000..8b6129dde Binary files /dev/null and b/images/quick-start/cdops-app-rollout-panel.png differ diff --git a/images/quick-start/cdops-ci-docker-hub.png b/images/quick-start/cdops-ci-docker-hub.png new file mode 100644 index 000000000..2c50dd0ac Binary files /dev/null and b/images/quick-start/cdops-ci-docker-hub.png differ diff --git a/images/quick-start/create-account/bitbucket-authorize.png b/images/quick-start/create-account/bitbucket-authorize.png new file mode 100644 index 000000000..987adbff3 Binary files /dev/null and b/images/quick-start/create-account/bitbucket-authorize.png differ diff --git a/images/quick-start/create-account/codefresh-accountname.png b/images/quick-start/create-account/codefresh-accountname.png new file mode 100644 index 000000000..44cfd1036 Binary files /dev/null and b/images/quick-start/create-account/codefresh-accountname.png differ diff --git a/images/quick-start/create-account/codefresh-dashboard.png b/images/quick-start/create-account/codefresh-dashboard.png new file mode 100644 index 000000000..86c077b08 Binary files /dev/null and b/images/quick-start/create-account/codefresh-dashboard.png differ diff --git a/images/quick-start/create-account/codefresh-personalize.png b/images/quick-start/create-account/codefresh-personalize.png new file mode 100644 index 000000000..205fd55b3 Binary files /dev/null and b/images/quick-start/create-account/codefresh-personalize.png differ diff --git a/images/quick-start/create-account/codefresh-signup.png b/images/quick-start/create-account/codefresh-signup.png new file mode 100644 index 000000000..ab7a35661 Binary files /dev/null and b/images/quick-start/create-account/codefresh-signup.png differ diff --git a/images/quick-start/create-account/create-account-steps.png b/images/quick-start/create-account/create-account-steps.png new file mode 100644 index 000000000..9769257b5 Binary files /dev/null and b/images/quick-start/create-account/create-account-steps.png differ diff --git a/images/quick-start/create-account/git-firewall.png b/images/quick-start/create-account/git-firewall.png new file mode 100644 index 000000000..bf4842c4f Binary files /dev/null and b/images/quick-start/create-account/git-firewall.png differ diff --git a/images/quick-start/create-account/github-authorize.png b/images/quick-start/create-account/github-authorize.png new file mode 100644 index 000000000..49aa8809b Binary files /dev/null and b/images/quick-start/create-account/github-authorize.png differ diff --git a/images/quick-start/create-account/gitlab-authorize.png b/images/quick-start/create-account/gitlab-authorize.png new file mode 100644 index 000000000..d666b39f6 Binary files /dev/null and b/images/quick-start/create-account/gitlab-authorize.png differ diff --git a/images/quick-start/create-account/select-identity-provider.png b/images/quick-start/create-account/select-identity-provider.png new file mode 100644 index 000000000..d5321606e Binary files /dev/null and b/images/quick-start/create-account/select-identity-provider.png differ diff --git a/images/quick-start/create-account/stash.png b/images/quick-start/create-account/stash.png new file mode 100644 index 000000000..1415da356 Binary files /dev/null and b/images/quick-start/create-account/stash.png differ diff --git a/images/quick-start/quick-start-ci-pipeline-arguments.png b/images/quick-start/quick-start-ci-pipeline-arguments.png new file mode 100644 index 000000000..1d7ea0234 Binary files /dev/null and b/images/quick-start/quick-start-ci-pipeline-arguments.png differ diff --git a/images/quick-start/quick-start-ci/add-docker-hub.png b/images/quick-start/quick-start-ci/add-docker-hub.png new file mode 100644 index 000000000..fcac5f314 Binary files /dev/null and b/images/quick-start/quick-start-ci/add-docker-hub.png differ diff --git a/images/quick-start/quick-start-ci/add-new.png b/images/quick-start/quick-start-ci/add-new.png new file mode 100644 index 000000000..92780006e Binary files /dev/null and b/images/quick-start/quick-start-ci/add-new.png differ diff --git a/images/quick-start/quick-start-ci/build-button.png b/images/quick-start/quick-start-ci/build-button.png new file mode 100644 index 000000000..6da6cdc1c Binary files /dev/null and b/images/quick-start/quick-start-ci/build-button.png differ diff --git a/images/quick-start/quick-start-ci/build-method.png b/images/quick-start/quick-start-ci/build-method.png new file mode 100644 index 000000000..1be012842 Binary files /dev/null and b/images/quick-start/quick-start-ci/build-method.png differ diff --git a/images/quick-start/quick-start-ci/building.png b/images/quick-start/quick-start-ci/building.png new file mode 100644 index 000000000..07d491905 Binary files /dev/null and b/images/quick-start/quick-start-ci/building.png differ diff --git a/images/quick-start/quick-start-ci/ci-with-merges.svg b/images/quick-start/quick-start-ci/ci-with-merges.svg new file mode 100644 index 000000000..2da15cc57 --- /dev/null +++ b/images/quick-start/quick-start-ci/ci-with-merges.svg @@ -0,0 +1,974 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + Git + + + + Compile/Package + + + + Build Docker Image + + + StoreDocker Image + + + PushDockerImage + + + + Test + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Pull request + Pull request + Feature 2 + Feature 1 + Master + + + + + diff --git a/images/quick-start/quick-start-ci/create-ci-project.png b/images/quick-start/quick-start-ci/create-ci-project.png new file mode 100644 index 000000000..b794862c0 Binary files /dev/null and b/images/quick-start/quick-start-ci/create-ci-project.png differ diff --git a/images/quick-start/quick-start-ci/create-pipeline.png b/images/quick-start/quick-start-ci/create-pipeline.png new file mode 100644 index 000000000..332301eb6 Binary files /dev/null and b/images/quick-start/quick-start-ci/create-pipeline.png differ diff --git a/images/quick-start/quick-start-ci/dashboard.png b/images/quick-start/quick-start-ci/dashboard.png new file mode 100644 index 000000000..34a927ba3 Binary files /dev/null and b/images/quick-start/quick-start-ci/dashboard.png differ diff --git a/images/quick-start/quick-start-ci/docker-build-steps.jpg b/images/quick-start/quick-start-ci/docker-build-steps.jpg new file mode 100644 index 000000000..69b2f4165 Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-build-steps.jpg differ diff --git a/images/quick-start/quick-start-ci/docker-build-steps.png b/images/quick-start/quick-start-ci/docker-build-steps.png new file mode 100644 index 000000000..178b822eb Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-build-steps.png differ diff --git a/images/quick-start/quick-start-ci/docker-build-steps.svg b/images/quick-start/quick-start-ci/docker-build-steps.svg new file mode 100644 index 000000000..2b0021a39 --- /dev/null +++ b/images/quick-start/quick-start-ci/docker-build-steps.svg @@ -0,0 +1,270 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + Git + + + + Compile/Package + + + + Build Docker Image + + + + + diff --git a/images/quick-start/quick-start-ci/docker-file-locate.png b/images/quick-start/quick-start-ci/docker-file-locate.png new file mode 100644 index 000000000..81d73d6d5 Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-file-locate.png differ diff --git a/images/quick-start/quick-start-ci/docker-hub.png b/images/quick-start/quick-start-ci/docker-hub.png new file mode 100644 index 000000000..ff945379c Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-hub.png differ diff --git a/images/quick-start/quick-start-ci/docker-images.png b/images/quick-start/quick-start-ci/docker-images.png new file mode 100644 index 000000000..986ba244e Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-images.png differ diff --git a/images/quick-start/quick-start-ci/docker-layers.png b/images/quick-start/quick-start-ci/docker-layers.png new file mode 100644 index 000000000..090e94c0d Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-layers.png differ diff --git a/images/quick-start/quick-start-ci/docker-push-stage.jpg b/images/quick-start/quick-start-ci/docker-push-stage.jpg new file mode 100644 index 000000000..47575b018 Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-push-stage.jpg differ diff --git a/images/quick-start/quick-start-ci/docker-push-stage.png b/images/quick-start/quick-start-ci/docker-push-stage.png new file mode 100644 index 000000000..101dec208 Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-push-stage.png differ diff --git a/images/quick-start/quick-start-ci/docker-push-stage.svg b/images/quick-start/quick-start-ci/docker-push-stage.svg new file mode 100644 index 000000000..597cd444d --- /dev/null +++ b/images/quick-start/quick-start-ci/docker-push-stage.svg @@ -0,0 +1,356 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Git + + Compile/Package + + Build Docker Image + + StoreDocker Image + + + PushDockerImage + + + Test + + + + + + + diff --git a/images/quick-start/quick-start-ci/docker-pushing.png b/images/quick-start/quick-start-ci/docker-pushing.png new file mode 100644 index 000000000..68243eaf0 Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-pushing.png differ diff --git a/images/quick-start/quick-start-ci/docker-store-stage.jpg b/images/quick-start/quick-start-ci/docker-store-stage.jpg new file mode 100644 index 000000000..df71408b8 Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-store-stage.jpg differ diff --git a/images/quick-start/quick-start-ci/docker-store-stage.png b/images/quick-start/quick-start-ci/docker-store-stage.png new file mode 100644 index 000000000..99ddf870b Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-store-stage.png differ diff --git a/images/quick-start/quick-start-ci/docker-store-stage.svg b/images/quick-start/quick-start-ci/docker-store-stage.svg new file mode 100644 index 000000000..dcc35e69f --- /dev/null +++ b/images/quick-start/quick-start-ci/docker-store-stage.svg @@ -0,0 +1,322 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Git + + Compile/Package + + Build Docker Image + + + StoreDocker Image + + + Test + + + + + + diff --git a/images/quick-start/quick-start-ci/docker-timeline.png b/images/quick-start/quick-start-ci/docker-timeline.png new file mode 100644 index 000000000..ea7c844d9 Binary files /dev/null and b/images/quick-start/quick-start-ci/docker-timeline.png differ diff --git a/images/quick-start/quick-start-ci/external-registries.png b/images/quick-start/quick-start-ci/external-registries.png new file mode 100644 index 000000000..572b45d1b Binary files /dev/null and b/images/quick-start/quick-start-ci/external-registries.png differ diff --git a/images/quick-start/quick-start-ci/finished-build.png b/images/quick-start/quick-start-ci/finished-build.png new file mode 100644 index 000000000..aa474d944 Binary files /dev/null and b/images/quick-start/quick-start-ci/finished-build.png differ diff --git a/images/quick-start/quick-start-ci/fork-example-project.png b/images/quick-start/quick-start-ci/fork-example-project.png new file mode 100644 index 000000000..22dd534b6 Binary files /dev/null and b/images/quick-start/quick-start-ci/fork-example-project.png differ diff --git a/images/quick-start/quick-start-ci/inspect-docker-file.png b/images/quick-start/quick-start-ci/inspect-docker-file.png new file mode 100644 index 000000000..8652e71ec Binary files /dev/null and b/images/quick-start/quick-start-ci/inspect-docker-file.png differ diff --git a/images/quick-start/quick-start-ci/pipeline-overview.jpg b/images/quick-start/quick-start-ci/pipeline-overview.jpg new file mode 100644 index 000000000..6443154ef Binary files /dev/null and b/images/quick-start/quick-start-ci/pipeline-overview.jpg differ diff --git a/images/quick-start/quick-start-ci/pipeline-overview.png b/images/quick-start/quick-start-ci/pipeline-overview.png new file mode 100644 index 000000000..b36aa08d6 Binary files /dev/null and b/images/quick-start/quick-start-ci/pipeline-overview.png differ diff --git a/images/quick-start/quick-start-ci/pipeline-overview.svg b/images/quick-start/quick-start-ci/pipeline-overview.svg new file mode 100644 index 000000000..51fbcdadb --- /dev/null +++ b/images/quick-start/quick-start-ci/pipeline-overview.svg @@ -0,0 +1,401 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + Git + + + + Compile/Package + + + + Build Docker Image + + + StoreDocker Image + + + PushDockerImage + + + + Test + + DemoEnvironment + + + + + + + + diff --git a/images/quick-start/quick-start-ci/push-to-registry.png b/images/quick-start/quick-start-ci/push-to-registry.png new file mode 100644 index 000000000..26214fb07 Binary files /dev/null and b/images/quick-start/quick-start-ci/push-to-registry.png differ diff --git a/images/quick-start/quick-start-ci/sample-application.png b/images/quick-start/quick-start-ci/sample-application.png new file mode 100644 index 000000000..0bbe73d5f Binary files /dev/null and b/images/quick-start/quick-start-ci/sample-application.png differ diff --git a/images/quick-start/quick-start-ci/select-branch-to-build.png b/images/quick-start/quick-start-ci/select-branch-to-build.png new file mode 100644 index 000000000..9ecb3f18c Binary files /dev/null and b/images/quick-start/quick-start-ci/select-branch-to-build.png differ diff --git a/images/quick-start/quick-start-ci/select-git-repository.png b/images/quick-start/quick-start-ci/select-git-repository.png new file mode 100644 index 000000000..d42f3c64c Binary files /dev/null and b/images/quick-start/quick-start-ci/select-git-repository.png differ diff --git a/images/quick-start/quick-start-ci/unit-test-result.png b/images/quick-start/quick-start-ci/unit-test-result.png new file mode 100644 index 000000000..a7ad54d86 Binary files /dev/null and b/images/quick-start/quick-start-ci/unit-test-result.png differ diff --git a/images/quick-start/quick-start-ci/unit-test-stage.jpg b/images/quick-start/quick-start-ci/unit-test-stage.jpg new file mode 100644 index 000000000..9e0c9c06e Binary files /dev/null and b/images/quick-start/quick-start-ci/unit-test-stage.jpg differ diff --git a/images/quick-start/quick-start-ci/unit-test-stage.png b/images/quick-start/quick-start-ci/unit-test-stage.png new file mode 100644 index 000000000..e0e098ff2 Binary files /dev/null and b/images/quick-start/quick-start-ci/unit-test-stage.png differ diff --git a/images/quick-start/quick-start-ci/unit-test-stage.svg b/images/quick-start/quick-start-ci/unit-test-stage.svg new file mode 100644 index 000000000..29b91cffc --- /dev/null +++ b/images/quick-start/quick-start-ci/unit-test-stage.svg @@ -0,0 +1,287 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Git + + Compile/Package + + Build Docker Image + + + Test + + + + + + diff --git a/images/quick-start/quick-start-ci/unit-tests.png b/images/quick-start/quick-start-ci/unit-tests.png new file mode 100644 index 000000000..e881c02a6 Binary files /dev/null and b/images/quick-start/quick-start-ci/unit-tests.png differ diff --git a/images/quick-start/quick-start-ci/webhook-github.png b/images/quick-start/quick-start-ci/webhook-github.png new file mode 100644 index 000000000..60021616b Binary files /dev/null and b/images/quick-start/quick-start-ci/webhook-github.png differ diff --git a/images/quick-start/quick-start-ci/yaml-toggle.png b/images/quick-start/quick-start-ci/yaml-toggle.png new file mode 100644 index 000000000..a3712d889 Binary files /dev/null and b/images/quick-start/quick-start-ci/yaml-toggle.png differ diff --git a/images/quick-start/quick-start-download-cli.png b/images/quick-start/quick-start-download-cli.png new file mode 100644 index 000000000..8ad04b95b Binary files /dev/null and b/images/quick-start/quick-start-download-cli.png differ diff --git a/images/quick-start/quick-start-edit-git-event-source.png b/images/quick-start/quick-start-edit-git-event-source.png new file mode 100644 index 000000000..ce046cc6c Binary files /dev/null and b/images/quick-start/quick-start-edit-git-event-source.png differ diff --git a/images/quick-start/quick-start-git-event-permissions.png b/images/quick-start/quick-start-git-event-permissions.png new file mode 100644 index 000000000..7e4403600 Binary files /dev/null and b/images/quick-start/quick-start-git-event-permissions.png differ diff --git a/images/quick-start/quick-start-git-source-edit.png b/images/quick-start/quick-start-git-source-edit.png new file mode 100644 index 000000000..27b546bac Binary files /dev/null and b/images/quick-start/quick-start-git-source-edit.png differ diff --git a/images/quick-start/quick-start-git-source-manifest-edit.png b/images/quick-start/quick-start-git-source-manifest-edit.png new file mode 100644 index 000000000..a10b1ad58 Binary files /dev/null and b/images/quick-start/quick-start-git-source-manifest-edit.png differ diff --git a/images/quick-start/quick-start-git-source-repo.png b/images/quick-start/quick-start-git-source-repo.png new file mode 100644 index 000000000..6a068b5bb Binary files /dev/null and b/images/quick-start/quick-start-git-source-repo.png differ diff --git a/images/quick-start/quick-start-git-source-tab.png b/images/quick-start/quick-start-git-source-tab.png new file mode 100644 index 000000000..f1daca218 Binary files /dev/null and b/images/quick-start/quick-start-git-source-tab.png differ diff --git a/images/quick-start/quick-start-git-source.png b/images/quick-start/quick-start-git-source.png new file mode 100644 index 000000000..011dd1c29 Binary files /dev/null and b/images/quick-start/quick-start-git-source.png differ diff --git a/images/quick-start/quick-start-gitsources.png b/images/quick-start/quick-start-gitsources.png new file mode 100644 index 000000000..963c7c81c Binary files /dev/null and b/images/quick-start/quick-start-gitsources.png differ diff --git a/images/quick-start/quick-start-helm/empty-helm-cluster.png b/images/quick-start/quick-start-helm/empty-helm-cluster.png new file mode 100644 index 000000000..3af8d098e Binary files /dev/null and b/images/quick-start/quick-start-helm/empty-helm-cluster.png differ diff --git a/images/quick-start/quick-start-helm/helm-logs.png b/images/quick-start/quick-start-helm/helm-logs.png new file mode 100644 index 000000000..fa5a57942 Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-logs.png differ diff --git a/images/quick-start/quick-start-helm/helm-only-store.png b/images/quick-start/quick-start-helm/helm-only-store.png new file mode 100644 index 000000000..0e7f4e2c6 Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-only-store.png differ diff --git a/images/quick-start/quick-start-helm/helm-release-details.png b/images/quick-start/quick-start-helm/helm-release-details.png new file mode 100644 index 000000000..fdd692b8e Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-release-details.png differ diff --git a/images/quick-start/quick-start-helm/helm-repo.png b/images/quick-start/quick-start-helm/helm-repo.png new file mode 100644 index 000000000..bab1ce09b Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-repo.png differ diff --git a/images/quick-start/quick-start-helm/helm-rollback.png b/images/quick-start/quick-start-helm/helm-rollback.png new file mode 100644 index 000000000..8e7afa0a2 Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-rollback.png differ diff --git a/images/quick-start/quick-start-helm/helm-select-shared-configuration.png b/images/quick-start/quick-start-helm/helm-select-shared-configuration.png new file mode 100644 index 000000000..4f1cb75c0 Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-select-shared-configuration.png differ diff --git a/images/quick-start/quick-start-helm/helm-values.png b/images/quick-start/quick-start-helm/helm-values.png new file mode 100644 index 000000000..f5eae9b13 Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-values.png differ diff --git a/images/quick-start/quick-start-helm/helm-version-selection.png b/images/quick-start/quick-start-helm/helm-version-selection.png new file mode 100644 index 000000000..b88412000 Binary files /dev/null and b/images/quick-start/quick-start-helm/helm-version-selection.png differ diff --git a/images/quick-start/quick-start-helm/import-helm-repo-conf.png b/images/quick-start/quick-start-helm/import-helm-repo-conf.png new file mode 100644 index 000000000..6b0b984fe Binary files /dev/null and b/images/quick-start/quick-start-helm/import-helm-repo-conf.png differ diff --git a/images/quick-start/quick-start-k8s/add-codefresh-registry.png b/images/quick-start/quick-start-k8s/add-codefresh-registry.png new file mode 100644 index 000000000..1f189587d Binary files /dev/null and b/images/quick-start/quick-start-k8s/add-codefresh-registry.png differ diff --git a/images/quick-start/quick-start-k8s/add-service-button.png b/images/quick-start/quick-start-k8s/add-service-button.png new file mode 100644 index 000000000..5201bb34a Binary files /dev/null and b/images/quick-start/quick-start-k8s/add-service-button.png differ diff --git a/images/quick-start/quick-start-k8s/add-service.png b/images/quick-start/quick-start-k8s/add-service.png new file mode 100644 index 000000000..9e591d297 Binary files /dev/null and b/images/quick-start/quick-start-k8s/add-service.png differ diff --git a/images/quick-start/quick-start-k8s/after-change.png b/images/quick-start/quick-start-k8s/after-change.png new file mode 100644 index 000000000..5251e7d9d Binary files /dev/null and b/images/quick-start/quick-start-k8s/after-change.png differ diff --git a/images/quick-start/quick-start-k8s/after-deployment.png b/images/quick-start/quick-start-k8s/after-deployment.png new file mode 100644 index 000000000..24785c27f Binary files /dev/null and b/images/quick-start/quick-start-k8s/after-deployment.png differ diff --git a/images/quick-start/quick-start-k8s/before-change.png b/images/quick-start/quick-start-k8s/before-change.png new file mode 100644 index 000000000..00277967c Binary files /dev/null and b/images/quick-start/quick-start-k8s/before-change.png differ diff --git a/images/quick-start/quick-start-k8s/deployment-build.png b/images/quick-start/quick-start-k8s/deployment-build.png new file mode 100644 index 000000000..d99bb08c9 Binary files /dev/null and b/images/quick-start/quick-start-k8s/deployment-build.png differ diff --git a/images/quick-start/quick-start-k8s/deployment-in-progress.png b/images/quick-start/quick-start-k8s/deployment-in-progress.png new file mode 100644 index 000000000..d4f7c703e Binary files /dev/null and b/images/quick-start/quick-start-k8s/deployment-in-progress.png differ diff --git a/images/quick-start/quick-start-k8s/deployment-step.png b/images/quick-start/quick-start-k8s/deployment-step.png new file mode 100644 index 000000000..d4f7c703e Binary files /dev/null and b/images/quick-start/quick-start-k8s/deployment-step.png differ diff --git a/images/quick-start/quick-start-k8s/docker-image-name.png b/images/quick-start/quick-start-k8s/docker-image-name.png new file mode 100644 index 000000000..162a46c8f Binary files /dev/null and b/images/quick-start/quick-start-k8s/docker-image-name.png differ diff --git a/images/quick-start/quick-start-k8s/generate-api-button.png b/images/quick-start/quick-start-k8s/generate-api-button.png new file mode 100644 index 000000000..ab9926668 Binary files /dev/null and b/images/quick-start/quick-start-k8s/generate-api-button.png differ diff --git a/images/quick-start/quick-start-k8s/generate-api-key.png b/images/quick-start/quick-start-k8s/generate-api-key.png new file mode 100644 index 000000000..e2f795dce Binary files /dev/null and b/images/quick-start/quick-start-k8s/generate-api-key.png differ diff --git a/images/quick-start/quick-start-k8s/git-change.png b/images/quick-start/quick-start-k8s/git-change.png new file mode 100644 index 000000000..7e4e60c57 Binary files /dev/null and b/images/quick-start/quick-start-k8s/git-change.png differ diff --git a/images/quick-start/quick-start-k8s/integrations.png b/images/quick-start/quick-start-k8s/integrations.png new file mode 100644 index 000000000..3f90509a1 Binary files /dev/null and b/images/quick-start/quick-start-k8s/integrations.png differ diff --git a/images/quick-start/quick-start-k8s/overview.png b/images/quick-start/quick-start-k8s/overview.png new file mode 100644 index 000000000..d04961b28 Binary files /dev/null and b/images/quick-start/quick-start-k8s/overview.png differ diff --git a/images/quick-start/quick-start-k8s/push-to-registry.png b/images/quick-start/quick-start-k8s/push-to-registry.png new file mode 100644 index 000000000..009f52eed Binary files /dev/null and b/images/quick-start/quick-start-k8s/push-to-registry.png differ diff --git a/images/quick-start/quick-start-manifest-expand.png b/images/quick-start/quick-start-manifest-expand.png new file mode 100644 index 000000000..5d622329d Binary files /dev/null and b/images/quick-start/quick-start-manifest-expand.png differ diff --git a/images/quick-start/quick-start-manifest-git-source.png b/images/quick-start/quick-start-manifest-git-source.png new file mode 100644 index 000000000..30f8e59b0 Binary files /dev/null and b/images/quick-start/quick-start-manifest-git-source.png differ diff --git a/images/quick-start/quick-start-new-pipeline-filled.png b/images/quick-start/quick-start-new-pipeline-filled.png new file mode 100644 index 000000000..a3e5754a6 Binary files /dev/null and b/images/quick-start/quick-start-new-pipeline-filled.png differ diff --git a/images/quick-start/quick-start-new-pipeline.png b/images/quick-start/quick-start-new-pipeline.png new file mode 100644 index 000000000..da50ef492 Binary files /dev/null and b/images/quick-start/quick-start-new-pipeline.png differ diff --git a/images/quick-start/quick-start-pipelines.png b/images/quick-start/quick-start-pipelines.png new file mode 100644 index 000000000..a0b38513f Binary files /dev/null and b/images/quick-start/quick-start-pipelines.png differ diff --git a/images/quick-start/quick-start-pipelines1.png b/images/quick-start/quick-start-pipelines1.png new file mode 100644 index 000000000..257944b04 Binary files /dev/null and b/images/quick-start/quick-start-pipelines1.png differ diff --git a/images/quick-start/quick-start-runtime-components.png b/images/quick-start/quick-start-runtime-components.png new file mode 100644 index 000000000..c43aa1f82 Binary files /dev/null and b/images/quick-start/quick-start-runtime-components.png differ diff --git a/images/quick-start/quick-start-runtime-dashboard.png b/images/quick-start/quick-start-runtime-dashboard.png new file mode 100644 index 000000000..cce742563 Binary files /dev/null and b/images/quick-start/quick-start-runtime-dashboard.png differ diff --git a/images/quick-start/quick-start-runtime-git-source-repo.png b/images/quick-start/quick-start-runtime-git-source-repo.png new file mode 100644 index 000000000..e8d3debe2 Binary files /dev/null and b/images/quick-start/quick-start-runtime-git-source-repo.png differ diff --git a/images/quick-start/quick-start-test-pr/auto-build-feature.png b/images/quick-start/quick-start-test-pr/auto-build-feature.png new file mode 100644 index 000000000..1189d9a71 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/auto-build-feature.png differ diff --git a/images/quick-start/quick-start-test-pr/auto-build-pr.png b/images/quick-start/quick-start-test-pr/auto-build-pr.png new file mode 100644 index 000000000..89669530d Binary files /dev/null and b/images/quick-start/quick-start-test-pr/auto-build-pr.png differ diff --git a/images/quick-start/quick-start-test-pr/auto-build-settings.png b/images/quick-start/quick-start-test-pr/auto-build-settings.png new file mode 100644 index 000000000..f74365f78 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/auto-build-settings.png differ diff --git a/images/quick-start/quick-start-test-pr/demo-environment.png b/images/quick-start/quick-start-test-pr/demo-environment.png new file mode 100644 index 000000000..48614f490 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/demo-environment.png differ diff --git a/images/quick-start/quick-start-test-pr/demo-stage.jpg b/images/quick-start/quick-start-test-pr/demo-stage.jpg new file mode 100644 index 000000000..db836686c Binary files /dev/null and b/images/quick-start/quick-start-test-pr/demo-stage.jpg differ diff --git a/images/quick-start/quick-start-test-pr/demo-stage.png b/images/quick-start/quick-start-test-pr/demo-stage.png new file mode 100644 index 000000000..aafe111be Binary files /dev/null and b/images/quick-start/quick-start-test-pr/demo-stage.png differ diff --git a/images/quick-start/quick-start-test-pr/demo-stage.svg b/images/quick-start/quick-start-test-pr/demo-stage.svg new file mode 100644 index 000000000..eab0ac324 --- /dev/null +++ b/images/quick-start/quick-start-test-pr/demo-stage.svg @@ -0,0 +1,347 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + Git + + Compile/Package + + Build Docker Image + + StoreDocker Image + + Test + DemoEnvironment + + + + + + + diff --git a/images/quick-start/quick-start-test-pr/env-details.png b/images/quick-start/quick-start-test-pr/env-details.png new file mode 100644 index 000000000..949a2c051 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/env-details.png differ diff --git a/images/quick-start/quick-start-test-pr/launch-dashboard.png b/images/quick-start/quick-start-test-pr/launch-dashboard.png new file mode 100644 index 000000000..05f317da9 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/launch-dashboard.png differ diff --git a/images/quick-start/quick-start-test-pr/launch-url.png b/images/quick-start/quick-start-test-pr/launch-url.png new file mode 100644 index 000000000..bacf0b7f9 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/launch-url.png differ diff --git a/images/quick-start/quick-start-test-pr/launch.png b/images/quick-start/quick-start-test-pr/launch.png new file mode 100644 index 000000000..ce0ae37cf Binary files /dev/null and b/images/quick-start/quick-start-test-pr/launch.png differ diff --git a/images/quick-start/quick-start-test-pr/multiple-pipelines.png b/images/quick-start/quick-start-test-pr/multiple-pipelines.png new file mode 100644 index 000000000..16a42677b Binary files /dev/null and b/images/quick-start/quick-start-test-pr/multiple-pipelines.png differ diff --git a/images/quick-start/quick-start-test-pr/port-setting.png b/images/quick-start/quick-start-test-pr/port-setting.png new file mode 100644 index 000000000..a6f234a7c Binary files /dev/null and b/images/quick-start/quick-start-test-pr/port-setting.png differ diff --git a/images/quick-start/quick-start-test-pr/view-branches.png b/images/quick-start/quick-start-test-pr/view-branches.png new file mode 100644 index 000000000..df584d087 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/view-branches.png differ diff --git a/images/quick-start/quick-start-test-pr/view-prs.png b/images/quick-start/quick-start-test-pr/view-prs.png new file mode 100644 index 000000000..3706ca172 Binary files /dev/null and b/images/quick-start/quick-start-test-pr/view-prs.png differ diff --git a/images/sso/azure/1-azure-service.png b/images/sso/azure/1-azure-service.png deleted file mode 100644 index e24b0db13..000000000 Binary files a/images/sso/azure/1-azure-service.png and /dev/null differ diff --git a/images/sso/azure/10-Add-client-secret.png b/images/sso/azure/10-Add-client-secret.png deleted file mode 100644 index 9eb929441..000000000 Binary files a/images/sso/azure/10-Add-client-secret.png and /dev/null differ diff --git a/images/sso/azure/11-Set-reply-url.png b/images/sso/azure/11-Set-reply-url.png deleted file mode 100644 index 358648026..000000000 Binary files a/images/sso/azure/11-Set-reply-url.png and /dev/null differ diff --git a/images/sso/azure/12-set-reply-URL.png b/images/sso/azure/12-set-reply-URL.png deleted file mode 100644 index 22b82819e..000000000 Binary files a/images/sso/azure/12-set-reply-URL.png and /dev/null differ diff --git a/images/sso/azure/13-Enable-ID-Tokens.png b/images/sso/azure/13-Enable-ID-Tokens.png deleted file mode 100644 index d76d48d71..000000000 Binary files a/images/sso/azure/13-Enable-ID-Tokens.png and /dev/null differ diff --git a/images/sso/azure/2-app-registrations.png b/images/sso/azure/2-app-registrations.png deleted file mode 100644 index e9e793097..000000000 Binary files a/images/sso/azure/2-app-registrations.png and /dev/null differ diff --git a/images/sso/azure/3-register-an-app.png b/images/sso/azure/3-register-an-app.png deleted file mode 100644 index 3a3d4c15c..000000000 Binary files a/images/sso/azure/3-register-an-app.png and /dev/null differ diff --git a/images/sso/azure/4-created-app.png b/images/sso/azure/4-created-app.png deleted file mode 100644 index 3a1c8a969..000000000 Binary files a/images/sso/azure/4-created-app.png and /dev/null differ diff --git a/images/sso/azure/5-api-permissions.png b/images/sso/azure/5-api-permissions.png deleted file mode 100644 index b64467ad6..000000000 Binary files a/images/sso/azure/5-api-permissions.png and /dev/null differ diff --git a/images/sso/azure/6-request-api-permissions.png b/images/sso/azure/6-request-api-permissions.png deleted file mode 100644 index b344dd24b..000000000 Binary files a/images/sso/azure/6-request-api-permissions.png and /dev/null differ diff --git a/images/sso/azure/7-request-api-permissions.png b/images/sso/azure/7-request-api-permissions.png deleted file mode 100644 index 94d41031e..000000000 Binary files a/images/sso/azure/7-request-api-permissions.png and /dev/null differ diff --git a/images/sso/azure/8-Enabled-permissions.png b/images/sso/azure/8-Enabled-permissions.png deleted file mode 100644 index 212c0aca4..000000000 Binary files a/images/sso/azure/8-Enabled-permissions.png and /dev/null differ diff --git a/images/sso/azure/9-Create-secret-page.png b/images/sso/azure/9-Create-secret-page.png deleted file mode 100644 index 040bcb4b3..000000000 Binary files a/images/sso/azure/9-Create-secret-page.png and /dev/null differ diff --git a/images/sso/azure/azure-properties-object-app-ids.png b/images/sso/azure/azure-properties-object-app-ids.png new file mode 100644 index 000000000..fc81be46b Binary files /dev/null and b/images/sso/azure/azure-properties-object-app-ids.png differ diff --git a/images/sso/azure/azure-step01.png b/images/sso/azure/azure-step01.png deleted file mode 100644 index c5b2169a8..000000000 Binary files a/images/sso/azure/azure-step01.png and /dev/null differ diff --git a/images/sso/azure/azure-step02.png b/images/sso/azure/azure-step02.png deleted file mode 100644 index d1a3375a7..000000000 Binary files a/images/sso/azure/azure-step02.png and /dev/null differ diff --git a/images/sso/azure/azure-step03.png b/images/sso/azure/azure-step03.png deleted file mode 100644 index ea1226182..000000000 Binary files a/images/sso/azure/azure-step03.png and /dev/null differ diff --git a/images/sso/azure/azure-step04.png b/images/sso/azure/azure-step04.png deleted file mode 100644 index ee1506cb8..000000000 Binary files a/images/sso/azure/azure-step04.png and /dev/null differ diff --git a/images/sso/azure/azure-step05.png b/images/sso/azure/azure-step05.png deleted file mode 100644 index 8968ec581..000000000 Binary files a/images/sso/azure/azure-step05.png and /dev/null differ diff --git a/images/sso/azure/azure-step06.png b/images/sso/azure/azure-step06.png deleted file mode 100644 index 9121527a1..000000000 Binary files a/images/sso/azure/azure-step06.png and /dev/null differ diff --git a/images/sso/azure/azure-step07.png b/images/sso/azure/azure-step07.png deleted file mode 100644 index efdd321f3..000000000 Binary files a/images/sso/azure/azure-step07.png and /dev/null differ diff --git a/images/sso/azure/azure-step08.png b/images/sso/azure/azure-step08.png deleted file mode 100644 index f0687e654..000000000 Binary files a/images/sso/azure/azure-step08.png and /dev/null differ diff --git a/images/sso/azure/azure-step09.png b/images/sso/azure/azure-step09.png deleted file mode 100644 index 4ebef7173..000000000 Binary files a/images/sso/azure/azure-step09.png and /dev/null differ diff --git a/images/sso/azure/azure-step10.png b/images/sso/azure/azure-step10.png deleted file mode 100644 index 64e474079..000000000 Binary files a/images/sso/azure/azure-step10.png and /dev/null differ diff --git a/images/sso/azure/azure-step5.png b/images/sso/azure/azure-step5.png deleted file mode 100644 index 050c60f0b..000000000 Binary files a/images/sso/azure/azure-step5.png and /dev/null differ diff --git a/images/sso/azure/client-secret-add-description.png b/images/sso/azure/client-secret-add-description.png new file mode 100644 index 000000000..c16e0ef9d Binary files /dev/null and b/images/sso/azure/client-secret-add-description.png differ diff --git a/images/sso/azure/client-secret-select-option.png b/images/sso/azure/client-secret-select-option.png new file mode 100644 index 000000000..e54b6dd46 Binary files /dev/null and b/images/sso/azure/client-secret-select-option.png differ diff --git a/images/sso/azure/config-app-permissions-added.png b/images/sso/azure/config-app-permissions-added.png new file mode 100644 index 000000000..044deb128 Binary files /dev/null and b/images/sso/azure/config-app-permissions-added.png differ diff --git a/images/sso/azure/config-app-permissions-grant-admin-consent.png b/images/sso/azure/config-app-permissions-grant-admin-consent.png new file mode 100644 index 000000000..bb2c7e956 Binary files /dev/null and b/images/sso/azure/config-app-permissions-grant-admin-consent.png differ diff --git a/images/sso/azure/config-app-permissions-microsoft-graph.png b/images/sso/azure/config-app-permissions-microsoft-graph.png new file mode 100644 index 000000000..15bd1821c Binary files /dev/null and b/images/sso/azure/config-app-permissions-microsoft-graph.png differ diff --git a/images/sso/azure/config-app-permissions-selected.png b/images/sso/azure/config-app-permissions-selected.png new file mode 100644 index 000000000..c020f24b1 Binary files /dev/null and b/images/sso/azure/config-app-permissions-selected.png differ diff --git a/images/sso/azure/redirect-rui-define-select-id-tokens.png b/images/sso/azure/redirect-rui-define-select-id-tokens.png new file mode 100644 index 000000000..0e8985d60 Binary files /dev/null and b/images/sso/azure/redirect-rui-define-select-id-tokens.png differ diff --git a/images/sso/azure/redirect-uri-web-configure.png b/images/sso/azure/redirect-uri-web-configure.png new file mode 100644 index 000000000..05f27e91c Binary files /dev/null and b/images/sso/azure/redirect-uri-web-configure.png differ diff --git a/images/sso/azure/register-app-name.png b/images/sso/azure/register-app-name.png new file mode 100644 index 000000000..aa4e542a6 Binary files /dev/null and b/images/sso/azure/register-app-name.png differ diff --git a/images/sso/azure/register-app-select-azure-ad.png b/images/sso/azure/register-app-select-azure-ad.png new file mode 100644 index 000000000..d2efcfb07 Binary files /dev/null and b/images/sso/azure/register-app-select-azure-ad.png differ diff --git a/images/sso/azure/sso-codefresh-generated-client-id.png b/images/sso/azure/sso-codefresh-generated-client-id.png new file mode 100644 index 000000000..86cb716ec Binary files /dev/null and b/images/sso/azure/sso-codefresh-generated-client-id.png differ diff --git a/images/sso/azure/sso-codefresh-settings.png b/images/sso/azure/sso-codefresh-settings.png new file mode 100644 index 000000000..eabe5f6dd Binary files /dev/null and b/images/sso/azure/sso-codefresh-settings.png differ diff --git a/images/testing/codacy/codacy-add-repo.png b/images/testing/codacy/codacy-add-repo.png new file mode 100644 index 000000000..23a89a3df Binary files /dev/null and b/images/testing/codacy/codacy-add-repo.png differ diff --git a/images/testing/codacy/codacy-create-api-token.png b/images/testing/codacy/codacy-create-api-token.png new file mode 100644 index 000000000..fa2eedaa3 Binary files /dev/null and b/images/testing/codacy/codacy-create-api-token.png differ diff --git a/images/testing/codacy/codacy-pipeline.png b/images/testing/codacy/codacy-pipeline.png new file mode 100644 index 000000000..0acf883bd Binary files /dev/null and b/images/testing/codacy/codacy-pipeline.png differ diff --git a/images/testing/codacy/codacy-report.png b/images/testing/codacy/codacy-report.png new file mode 100644 index 000000000..5b185fe21 Binary files /dev/null and b/images/testing/codacy/codacy-report.png differ diff --git a/images/testing/codacy/codacy-variable.png b/images/testing/codacy/codacy-variable.png new file mode 100644 index 000000000..7dc5fc445 Binary files /dev/null and b/images/testing/codacy/codacy-variable.png differ diff --git a/images/testing/codacy/create-api-token.png b/images/testing/codacy/create-api-token.png new file mode 100644 index 000000000..0eca544df Binary files /dev/null and b/images/testing/codacy/create-api-token.png differ diff --git a/images/testing/codacy/create-codacy-pipeline.png b/images/testing/codacy/create-codacy-pipeline.png new file mode 100644 index 000000000..1fe4c8c4f Binary files /dev/null and b/images/testing/codacy/create-codacy-pipeline.png differ diff --git a/images/testing/codacy/file-analysis.png b/images/testing/codacy/file-analysis.png new file mode 100644 index 000000000..5324f4c27 Binary files /dev/null and b/images/testing/codacy/file-analysis.png differ diff --git a/images/testing/codecov/analysis-report.png b/images/testing/codecov/analysis-report.png new file mode 100644 index 000000000..4c2dfdc59 Binary files /dev/null and b/images/testing/codecov/analysis-report.png differ diff --git a/images/testing/codecov/codecov-interface.png b/images/testing/codecov/codecov-interface.png new file mode 100644 index 000000000..bb0044559 Binary files /dev/null and b/images/testing/codecov/codecov-interface.png differ diff --git a/images/testing/codecov/codecov-pipeline.png b/images/testing/codecov/codecov-pipeline.png new file mode 100644 index 000000000..72b0efd12 Binary files /dev/null and b/images/testing/codecov/codecov-pipeline.png differ diff --git a/images/testing/codecov/codecov-report-details.png b/images/testing/codecov/codecov-report-details.png new file mode 100644 index 000000000..e45890591 Binary files /dev/null and b/images/testing/codecov/codecov-report-details.png differ diff --git a/images/testing/codecov/codecov-report.png b/images/testing/codecov/codecov-report.png new file mode 100644 index 000000000..dbb9260de Binary files /dev/null and b/images/testing/codecov/codecov-report.png differ diff --git a/images/testing/compositions/972337d-codefresh_compose_by_url.png b/images/testing/compositions/972337d-codefresh_compose_by_url.png new file mode 100644 index 000000000..770270099 Binary files /dev/null and b/images/testing/compositions/972337d-codefresh_compose_by_url.png differ diff --git a/images/testing/compositions/add-composition-first.png b/images/testing/compositions/add-composition-first.png new file mode 100644 index 000000000..fb6158221 Binary files /dev/null and b/images/testing/compositions/add-composition-first.png differ diff --git a/images/testing/compositions/compose-from-template-edit.png b/images/testing/compositions/compose-from-template-edit.png new file mode 100644 index 000000000..f56d592b1 Binary files /dev/null and b/images/testing/compositions/compose-from-template-edit.png differ diff --git a/images/testing/compositions/compose-from-template-select-template.png b/images/testing/compositions/compose-from-template-select-template.png new file mode 100644 index 000000000..d06faf3e4 Binary files /dev/null and b/images/testing/compositions/compose-from-template-select-template.png differ diff --git a/images/testing/compositions/composition-file-in-repo.png b/images/testing/compositions/composition-file-in-repo.png new file mode 100644 index 000000000..de975f2fc Binary files /dev/null and b/images/testing/compositions/composition-file-in-repo.png differ diff --git a/images/testing/compositions/composition-launch-button.png b/images/testing/compositions/composition-launch-button.png new file mode 100644 index 000000000..7d7037655 Binary files /dev/null and b/images/testing/compositions/composition-launch-button.png differ diff --git a/images/testing/compositions/composition-launch-log.png b/images/testing/compositions/composition-launch-log.png new file mode 100644 index 000000000..eb510c5d3 Binary files /dev/null and b/images/testing/compositions/composition-launch-log.png differ diff --git a/images/testing/compositions/composition-list.png b/images/testing/compositions/composition-list.png new file mode 100644 index 000000000..e100f4369 Binary files /dev/null and b/images/testing/compositions/composition-list.png differ diff --git a/images/testing/compositions/composition-method.png b/images/testing/compositions/composition-method.png new file mode 100644 index 000000000..b17f91d37 Binary files /dev/null and b/images/testing/compositions/composition-method.png differ diff --git a/images/testing/compositions/composition-name.png b/images/testing/compositions/composition-name.png new file mode 100644 index 000000000..d313c9712 Binary files /dev/null and b/images/testing/compositions/composition-name.png differ diff --git a/images/testing/compositions/empty-composition.png b/images/testing/compositions/empty-composition.png new file mode 100644 index 000000000..72b8f1dce Binary files /dev/null and b/images/testing/compositions/empty-composition.png differ diff --git a/images/testing/compositions/environment-running.png b/images/testing/compositions/environment-running.png new file mode 100644 index 000000000..32a81245b Binary files /dev/null and b/images/testing/compositions/environment-running.png differ diff --git a/images/testing/compositions/existing-composition.png b/images/testing/compositions/existing-composition.png new file mode 100644 index 000000000..2afa2bdf5 Binary files /dev/null and b/images/testing/compositions/existing-composition.png differ diff --git a/images/testing/compositions/path-to-docker-compose.png b/images/testing/compositions/path-to-docker-compose.png new file mode 100644 index 000000000..f9414049a Binary files /dev/null and b/images/testing/compositions/path-to-docker-compose.png differ diff --git a/images/testing/compositions/replace-build.png b/images/testing/compositions/replace-build.png new file mode 100644 index 000000000..3041fa53d Binary files /dev/null and b/images/testing/compositions/replace-build.png differ diff --git a/images/testing/compositions/share-environment-link.png b/images/testing/compositions/share-environment-link.png new file mode 100644 index 000000000..c5998d85e Binary files /dev/null and b/images/testing/compositions/share-environment-link.png differ diff --git a/images/testing/coveralls/add-repository.png b/images/testing/coveralls/add-repository.png new file mode 100644 index 000000000..a1534759f Binary files /dev/null and b/images/testing/coveralls/add-repository.png differ diff --git a/images/testing/coveralls/coveralls-coverage.png b/images/testing/coveralls/coveralls-coverage.png new file mode 100644 index 000000000..85f8547ef Binary files /dev/null and b/images/testing/coveralls/coveralls-coverage.png differ diff --git a/images/testing/coveralls/coveralls-pipeline.png b/images/testing/coveralls/coveralls-pipeline.png new file mode 100644 index 000000000..fd319891b Binary files /dev/null and b/images/testing/coveralls/coveralls-pipeline.png differ diff --git a/images/testing/coveralls/coveralls-sample-app.png b/images/testing/coveralls/coveralls-sample-app.png new file mode 100644 index 000000000..a2ee18243 Binary files /dev/null and b/images/testing/coveralls/coveralls-sample-app.png differ diff --git a/images/testing/coveralls/coveralls-specific-report.png b/images/testing/coveralls/coveralls-specific-report.png new file mode 100644 index 000000000..1a71ffe22 Binary files /dev/null and b/images/testing/coveralls/coveralls-specific-report.png differ diff --git a/images/testing/coveralls/create-coveralls-pipeline.png b/images/testing/coveralls/create-coveralls-pipeline.png new file mode 100644 index 000000000..ca986dc8e Binary files /dev/null and b/images/testing/coveralls/create-coveralls-pipeline.png differ diff --git a/images/testing/dynamic-preview-environment.png b/images/testing/dynamic-preview-environment.png new file mode 100644 index 000000000..8a2994f5f Binary files /dev/null and b/images/testing/dynamic-preview-environment.png differ diff --git a/images/testing/integration-testing/complex-tests.png b/images/testing/integration-testing/complex-tests.png new file mode 100644 index 000000000..058340bf8 Binary files /dev/null and b/images/testing/integration-testing/complex-tests.png differ diff --git a/images/testing/integration-testing/complex-tests.svg b/images/testing/integration-testing/complex-tests.svg new file mode 100644 index 000000000..01a6f74d5 --- /dev/null +++ b/images/testing/integration-testing/complex-tests.svg @@ -0,0 +1,564 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + build Test image + + sh ./my-tests.sh  + my-web-app + + Codefresh Pipeline + hostname: my_postgres + + + + + + + + + + + + hostname: frontendport: 8080 + + + + hostname: redis_ds + + + + + + + + + my-backend + + hostname: backendport: 9000 + + + diff --git a/images/testing/integration-testing/from-source-code.png b/images/testing/integration-testing/from-source-code.png new file mode 100644 index 000000000..f62f82dee Binary files /dev/null and b/images/testing/integration-testing/from-source-code.png differ diff --git a/images/testing/integration-testing/from-source-code.svg b/images/testing/integration-testing/from-source-code.svg new file mode 100644 index 000000000..36d859a92 --- /dev/null +++ b/images/testing/integration-testing/from-source-code.svg @@ -0,0 +1,359 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + npm install + + npm test  + + node:9-alpine + + Codefresh Pipeline + hostname: my_redis + + + + + + + + + + + Redis + + + diff --git a/images/testing/integration-testing/multi-scope.png b/images/testing/integration-testing/multi-scope.png new file mode 100644 index 000000000..476c779aa Binary files /dev/null and b/images/testing/integration-testing/multi-scope.png differ diff --git a/images/testing/integration-testing/multi-scope.svg b/images/testing/integration-testing/multi-scope.svg new file mode 100644 index 000000000..4a336470d --- /dev/null +++ b/images/testing/integration-testing/multi-scope.svg @@ -0,0 +1,428 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + compile backend + + API testing  + + Test Env + + Codefresh Pipeline + Composition + + + Fuzzy testing  + + + GUI testing  + + + + + + + + + + + + diff --git a/images/testing/integration-testing/scope.svg b/images/testing/integration-testing/scope.svg new file mode 100644 index 000000000..740071a30 --- /dev/null +++ b/images/testing/integration-testing/scope.svg @@ -0,0 +1,426 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + npm install + + npm test  + + node:9-alpine + + + my-node:1.0 + + + Codefresh Pipeline + DockerBuild Context + + + + + + + + + + + + + /codefresh/volume + + + store node_modules + read node_modules + + diff --git a/images/testing/integration-testing/single-scope.png b/images/testing/integration-testing/single-scope.png new file mode 100644 index 000000000..7ed58805b Binary files /dev/null and b/images/testing/integration-testing/single-scope.png differ diff --git a/images/testing/integration-testing/single-scope.svg b/images/testing/integration-testing/single-scope.svg new file mode 100644 index 000000000..7fce00e3e --- /dev/null +++ b/images/testing/integration-testing/single-scope.svg @@ -0,0 +1,423 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + compile backend + + backend tests  + + Env A + + Codefresh Pipeline + Composition A + + + + compile frontend  + + + frontend tests  + + + Env B + + Composition B + + + diff --git a/images/testing/integration-testing/special-image.png b/images/testing/integration-testing/special-image.png new file mode 100644 index 000000000..1294febfa Binary files /dev/null and b/images/testing/integration-testing/special-image.png differ diff --git a/images/testing/integration-testing/special-image.svg b/images/testing/integration-testing/special-image.svg new file mode 100644 index 000000000..6cb5f8dd6 --- /dev/null +++ b/images/testing/integration-testing/special-image.svg @@ -0,0 +1,432 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + build Test image + + sh ./my-tests.sh  + my-web-app + + Codefresh Pipeline + hostname: my_postgres + + + + + + + + + + + + hostname: app + + + + + diff --git a/images/testing/integration-testing/to-app.png b/images/testing/integration-testing/to-app.png new file mode 100644 index 000000000..061256f0b Binary files /dev/null and b/images/testing/integration-testing/to-app.png differ diff --git a/images/testing/integration-testing/to-app.svg b/images/testing/integration-testing/to-app.svg new file mode 100644 index 000000000..76a858536 --- /dev/null +++ b/images/testing/integration-testing/to-app.svg @@ -0,0 +1,416 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + +   + mvn package + + mvn integration-test  + + my-java-app + + Codefresh Pipeline + hostname: my_postgres + + + + + + + + + + + Redis + + hostname: app + + + diff --git a/images/testing/security-scanning/.keep b/images/testing/security-scanning/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/images/testing/security-scanning/aqua-scan.png b/images/testing/security-scanning/aqua-scan.png new file mode 100644 index 000000000..a5562f18d Binary files /dev/null and b/images/testing/security-scanning/aqua-scan.png differ diff --git a/images/testing/security-scanning/clair-scan.png b/images/testing/security-scanning/clair-scan.png new file mode 100644 index 000000000..8729f4c66 Binary files /dev/null and b/images/testing/security-scanning/clair-scan.png differ diff --git a/images/testing/security-scanning/security-annotations.png b/images/testing/security-scanning/security-annotations.png new file mode 100644 index 000000000..1c52099c6 Binary files /dev/null and b/images/testing/security-scanning/security-annotations.png differ diff --git a/images/testing/security-scanning/security-test-results.png b/images/testing/security-scanning/security-test-results.png new file mode 100644 index 000000000..5b1bd6916 Binary files /dev/null and b/images/testing/security-scanning/security-test-results.png differ diff --git a/images/testing/security-scanning/snyk-test-report.png b/images/testing/security-scanning/snyk-test-report.png new file mode 100644 index 000000000..8316e955f Binary files /dev/null and b/images/testing/security-scanning/snyk-test-report.png differ diff --git a/images/testing/sonarqube/analysis-log.png b/images/testing/sonarqube/analysis-log.png new file mode 100644 index 000000000..328da43c0 Binary files /dev/null and b/images/testing/sonarqube/analysis-log.png differ diff --git a/images/testing/sonarqube/codefresh-yaml-sonar.png b/images/testing/sonarqube/codefresh-yaml-sonar.png new file mode 100644 index 000000000..863cf1013 Binary files /dev/null and b/images/testing/sonarqube/codefresh-yaml-sonar.png differ diff --git a/images/testing/sonarqube/generate-token.png b/images/testing/sonarqube/generate-token.png new file mode 100644 index 000000000..9b5b336e1 Binary files /dev/null and b/images/testing/sonarqube/generate-token.png differ diff --git a/images/testing/sonarqube/simplified-codefresh-pipeline.png b/images/testing/sonarqube/simplified-codefresh-pipeline.png new file mode 100644 index 000000000..83e6fcd60 Binary files /dev/null and b/images/testing/sonarqube/simplified-codefresh-pipeline.png differ diff --git a/images/testing/sonarqube/sonar-analysis-details.png b/images/testing/sonarqube/sonar-analysis-details.png new file mode 100644 index 000000000..8a913e0b5 Binary files /dev/null and b/images/testing/sonarqube/sonar-analysis-details.png differ diff --git a/images/testing/sonarqube/sonar-instructions.png b/images/testing/sonarqube/sonar-instructions.png new file mode 100644 index 000000000..9f33a8fa9 Binary files /dev/null and b/images/testing/sonarqube/sonar-instructions.png differ diff --git a/images/testing/sonarqube/sonar-project.png b/images/testing/sonarqube/sonar-project.png new file mode 100644 index 000000000..712783c02 Binary files /dev/null and b/images/testing/sonarqube/sonar-project.png differ diff --git a/images/testing/sonarqube/sonarqube-logo.png b/images/testing/sonarqube/sonarqube-logo.png new file mode 100644 index 000000000..568be52c5 Binary files /dev/null and b/images/testing/sonarqube/sonarqube-logo.png differ diff --git a/images/testing/unit-testing/unit-tests-in-dockerfile.png b/images/testing/unit-testing/unit-tests-in-dockerfile.png new file mode 100644 index 000000000..0704ce18d Binary files /dev/null and b/images/testing/unit-testing/unit-tests-in-dockerfile.png differ diff --git a/images/testing/unit-testing/unit-tests-with-app-image.png b/images/testing/unit-testing/unit-tests-with-app-image.png new file mode 100644 index 000000000..46cd81fd1 Binary files /dev/null and b/images/testing/unit-testing/unit-tests-with-app-image.png differ diff --git a/images/testing/unit-testing/unit-tests-with-dedicated-image.png b/images/testing/unit-testing/unit-tests-with-dedicated-image.png new file mode 100644 index 000000000..2346cebe1 Binary files /dev/null and b/images/testing/unit-testing/unit-tests-with-dedicated-image.png differ diff --git a/images/testing/unit-testing/unit-tests-with-external-image.png b/images/testing/unit-testing/unit-tests-with-external-image.png new file mode 100644 index 000000000..6d7504a99 Binary files /dev/null and b/images/testing/unit-testing/unit-tests-with-external-image.png differ