diff --git a/_data/home-content.yml b/_data/home-content.yml index 18f73ad1..4fdd3592 100644 --- a/_data/home-content.yml +++ b/_data/home-content.yml @@ -1,140 +1,147 @@ -- title: Getting Started - icon: images/home-icons/started.svg - url: '' - links: - - title: Introducing Codefresh - localurl: /docs/getting-started/csdp-introduction/ - - title: Quick start - localurl: /docs/getting-started/quick-start/ - - title: Concepts - localurl: /docs/getting-started/main-concepts/ - - title: Entity model - localurl: /docs/getting-started/entity-model/ - - title: Architecture - localurl: /docs/getting-started/architecture/ - - title: GitOps approach - localurl: /docs/getting-started/gitops/ - - title: Frequently asked questions - localurl: /docs/getting-started/faq/ -- title: Clients - icon: images/home-icons/client.svg + +- title: Example catalog + icon: images/home-icons/tutorial.svg url: '' links: - - title: Codefresh CLI - localurl: /docs/clients/csdp-cli/ + - title: CI/CD examples for pipelines + localurl: /docs/example-catalog/examples/ + - title: CI examples + localurl: /docs/example-catalog/ci-examples + - title: CD examples + localurl: /docs/example-catalog/cd-examples + - title: GitOps examples + localurl: /docs/example-catalog/gitops-examples -- title: Installation - icon: images/home-icons/runtimes.svg + + + +- title: Deployments + icon: images/home-icons/deployment.svg url: '' links: - - title: Installation environments - localurl: /docs/runtime/installation-options/ - - title: Set up a hosted runtime environment - localurl: /docs/runtime/hosted-runtime/ - - title: Hybrid runtime requirements - localurl: /docs/runtime/requirements/ - - title: Install hybrid runtimes - localurl: /docs/runtime/installation - - title: Manage provisioned runtimes - localurl: /docs/runtime/monitor-manage-runtimes/ - - title: Monitor provisioned hybrid runtimes - localurl: /docs/runtime/monitoring-troubleshooting/ - - title: Add external clusters to runtimes - localurl: /docs/runtime/managed-cluster/ - - title: Add Git Sources to runtimes - localurl: /docs/runtime/git-sources/ + - title: Deployment options for Kubernetes + localurl: /docs/deployments/kubernetes/deployment-options-to-kubernetes/ + - title: Managing Kubernetes clusters + localurl: /docs/deployments/kubernetes/manage-kubernetes/ + - title: Using Helm in Codefresh pipelines + localurl: /docs/deployments/helm/using-helm-in-codefresh-pipeline/ + - title: Managing Helm releases + localurl: /docs/deployments/helm/helm-releases-management/ + - title: Promoting Helm environments + localurl: /docs/deployments/helm/helm-environment-promotion/ + - title: Creating GitOps applications + localurl: /docs/deployment/gitops/create-application/ + - title: Monitoring applications + localurl: /docs/deployment/gitops/applications-dashboard/ + - title: Managing applications + localurl: /docs/deployment/gitops/manage-application/ - title: Pipelines icon: images/home-icons/pipeline.svg url: '' links: - - title: Creation - localurl: /docs/pipelines/create-pipeline + - title: Introduction to Pipelines + localurl: /docs/pipelines/introduction-to-codefresh-pipelines/ + - title: Creating Pipelines + localurl: /docs/pipelines/pipelines/ + - title: Pipeline triggers + localurl: /docs/pipelines/triggers/ + - title: Monitoring pipelines + localurl: /docs/pipelines/monitoring-pipelines/ + - title: Shared Configuration + localurl: /docs/pipelines/shared-configuration/ + - title: Using secrets + localurl: /docs/pipelines/secrets-store/ + - title: Pipeline caching + localurl: /docs/pipelines/pipeline-caching/ + - title: Running pipelines locally + localurl: /docs/pipelines/running-pipelines-locally/ + - title: Debugging pipelines + localurl: /docs/pipelines/debugging-pipelines/ + + +- title: Workflows + icon: images/home-icons/pipeline.svg + url: '' + links: + - title: Creating workflows + localurl: /docs/workflows/create-pipeline + - title: Nested workflows + localurl: /docs/workflows/nested-workflows/ - title: Configure artifact repository - localurl: /docs/pipelines/configure-artifact-repository/ + localurl: /docs/workflows/configure-artifact-repository/ - title: Selectors for concurrency synchronization - localurl: /docs/pipelines/concurrency-limit/ + localurl: /docs/workflows/concurrency-limit/ - title: Sharing file systems - localurl: /docs/pipelines/sharing-file-system/ + localurl: /docs/workflows/sharing-file-system/ -- title: Deployment - icon: images/home-icons/deployment.svg - url: '' - links: - - title: Creating applications - localurl: /docs/deployment/create-application/ - - title: Monitoring applications - localurl: /docs/deployment/applications-dashboard/ - - title: Managing applications - localurl: /docs/deployment/manage-application/ - - title: Images in Codefresh - localurl: /docs/deployment/images/ - - title: Install Argo Rollouts - localurl: /docs/deployment/install-argo-rollouts/ -- title: Reports & Insights - icon: images/home-icons/guides.png +- title: Clients + icon: images/home-icons/client.svg url: '' links: - - title: Home dashboard - localurl: /docs/reporting/home-dashboard/ - - title: DORA metrics - localurl: /docs/reporting/dora-metrics/ - - -- title: Image enrichment - icon: images/home-icons/integrations.svg + - title: Codefresh CLI + localurl: /docs/clients/csdp-cli/ + + +- title: Installation + icon: images/home-icons/runtimes.svg url: '' links: - - title: Image enrichment with integrations - localurl: /docs/integrations/image-enrichment-overview/ - - title: Codefresh Classic - localurl: /docs/integrations/ci-integrations/codefresh-classic/ - - title: GitHub Actions - localurl: /docs/integrations/ci-integrations/github-actions/ - - title: Jenkins - localurl: /docs/integrations/ci-integrations/jenkins/ - - title: Jira - localurl: /docs/integrations/issue-tracking/jira/ - - title: Amazon ECR - localurl: /docs/integrations/container-registries/amazon-ecr/ - - title: Docker Hub Registry - localurl: /docs/integrations/container-registries/dockerhub/ - - title: GitHub Container Registry - localurl: /docs/integrations/container-registries/github-cr/ - - title: JFrog Artifactory - localurl: /docs/integrations/container-registries/jfrog/ - - title: Quay Registry - localurl: /docs/integrations/container-registries/quay/ + - title: Installation environments + localurl: /docs/installation/installation-options/ + - title: Codefresh Runner CI/CD installation + localurl: /docs/installation/codefresh-runner/ + - title: On-Premises CI/CD installation + localurl: /docs/installation/codefresh-on-prem/ + - title: On-Premises CI/CD upgrade + localurl: /docs/installation/codefresh-on-prem-upgrade/ + - title: Hosted GitOps Runtime installation + localurl: /docs/installation/hosted-runtime/ + - title: Hybrid GitOps Runtime installation + localurl: /docs/installation/hybrid-gitops/ + - title: Monitoring & managing GitOps Runtimes + localurl: /docs/installation/monitor-manage-runtimes/ + - title: Adding external clusters to GitOps Runtimes + localurl: /docs/installation/managed-cluster/ + - title: Adding Git Sources to GitOps Runtimes + localurl: /docs/installation/git-sources/ - title: Administration icon: images/home-icons/administration.svg url: '' links: - - title: Manage users - localurl: /docs/administration/add-users/ + - title: Create a Codefresh account + localurl: /docs/administration/account-user-management/create-codefresh-account/ + - title: Adding users and teams + localurl: /docs/administration/account-user-management/add-users/ - title: Single Sign-On - localurl: /docs/administration/single-sign-on/ + localurl: /docs/single-sign-on/ - title: Set up OAuth2 authentication for Git providers - localurl: /docs/administration/oauth-setup/ - - title: User settings - localurl: /docs/administration/user-settings/ - - title: Access Control - localurl: /docs/administration/access-control/ + localurl: /docs/administration/account-user-management/oauth-setup/ + - title: Access control + localurl: /docs/administration/account-user-management/access-control/ - title: Audit - localurl: /docs/administration/audit/ + localurl: /docs/administration/account-user-management/audit/ - title: Codefresh IP addresses - localurl: /docs/administration/platform-ip-addresses/ + localurl: /docs/administration/account-user-management/platform-ip-addresses/ + - title: User settings + localurl: /docs/administration/user-self-management/user-settings/ + - title: Manage Git PATs + localurl: /docs/administration/user-self-management/manage-pats/ + - title: Reference icon: images/home-icons/guides.png url: '' links: + - title: Runner installation behind firewalls + url: /docs/behind-the-firewall/ - title: Git tokens localurl: /docs/reference/git-tokens/ - title: Secrets @@ -142,12 +149,7 @@ - title: Shared configuration repo localurl: /docs/reference/shared-configuration/ -- title: Troubleshooting - icon: images/home-icons/troubleshooting.svg - url: '' - links: - - title: Runtimes - localurl: /docs/troubleshooting/runtime-issues + diff --git a/_data/nav.yml b/_data/nav.yml index cfb2f70a..681b2dce 100644 --- a/_data/nav.yml +++ b/_data/nav.yml @@ -1,177 +1,358 @@ -- title: Getting started - url: "/getting-started" - pages: - - title: Introducing Codefresh - url: "/csdp-introduction" - - title: Quick start - url: "/quick-start" - sub-pages: - - title: Provision a hosted runtime - url: "/install-hosted" - - title: Prepare for hybrid runtime installation - url: "/verify-requirements" - - title: Install a hybrid runtime - url: "/runtime" - - title: Create an application - url: "/create-app-ui" - - title: Create and commit resources for application - url: "/create-app-specs" - - title: Update the image tag for application - url: "/create-rollout" - - title: Trigger the Hello World example pipeline - url: "/hello-world" - - title: Create a basic CI delivery pipeline - url: "/create-ci-pipeline" - - - - title: Main concepts - url: "/main-concepts" - - title: Entity model - url: "/entity-model" - - title: Architecture - url: "/architecture" - - title: GitOps approach - url: "/gitops" - - title: Frequently asked questions - url: "/faq" -- title: Clients - url: "/clients" + + + +- title: Example catalog + url: "/example-catalog" pages: - - title: Download CLI - url: "/csdp-cli" + - title: "CI/CD examples for pipelines" + url: "/examples" + - title: "CI examples" + url: "/ci-examples" + sub-pages: + - title: Check out Git repositories + url: "/git-checkout" + - title: Custom Git commmands + url: "/git-checkout-custom" + - title: Non-Git checkouts + url: "/non-git-checkout" + - title: Use Git Hash in CI + url: "/get-short-sha-id-and-use-it-in-a-ci-process" + - title: Build an Image with the Dockerfile in root directory + url: "/build-an-image-with-the-dockerfile-in-root-directory" + - title: Build an Images specifying Dockerfile Location + url: "/build-an-image-specify-dockerfile-location" + - title: Build an Image from a different Git repository + url: "/build-an-image-from-a-different-git-repository" + - title: Build and push an Image + url: "/build-and-push-an-image" + - title: Build an Image with build arguments + url: "/build-an-image-with-build-arguments" + - title: Share data between steps + url: "/shared-volumes-between-builds" + - title: Upload/download from Google Storage buckets + url: "/uploading-or-downloading-from-gs" + - title: Call other pipelines + url: "/call-child-pipelines" + - title: Run unit tests + url: "/run-unit-tests" + - title: Run integration tests + url: "/run-integration-tests" + - title: Fan-in/fan-out with unit tests + url: "/fan-in-fan-out" + - title: Codecov coverage reports + url: "/codecov-testing" + - title: Coveralls coverage reports + url: "/coveralls-testing" + - title: Codacy coverage reports + url: "/codacy-testing" + - title: Run integration tests with Mongo + url: "/integration-tests-with-mongo" + - title: Run integration tests with MySQL + url: "/integration-tests-with-mysql" + - title: Run integration tests with PostgreSQL + url: "/integration-tests-with-postgres" + - title: Run integration tests with Redis + url: "/integration-tests-with-redis" + - title: Populate a database with existing data + url: "/populate-a-database-with-existing-data" + - title: Share volumes in composition steps + url: "/shared-volumes-of-service-from-composition-step-for-other-yml-steps" + - title: Import data to MongoDB + url: "/import-data-to-mongodb" + - title: Vault Secrets in the Pipeline + url: "/vault-secrets-in-the-pipeline" + - title: Decrypt with Mozilla SOPS + url: "/decryption-with-mozilla-sops" + - title: GitOps secrets + url: "/gitops-secrets" + - title: Launch Composition + url: "/launch-composition" + - title: Use Docker compose + url: "/launching-a-composition-and-defining-a-service-environment-variables-using-a-file" + - title: Send notification to Slack + url: "/sending-the-notification-to-slack" + - title: Send notification to Jira + url: "/sending-the-notification-to-jira" + - title: "CD examples" + url: "/cd-examples" + sub-pages: + - title: Import data to MongoDB + url: "/import-data-to-mongodb" + - title: NodeJS + Angular2 + MongoDB + url: "/nodejs-angular2-mongodb" + - title: Secure a Docker Container Using HTTP Basic Auth + url: "/secure-a-docker-container-using-http-basic-auth" + - title: Spring Boot + Kafka + Zookeeper + url: "/spring-boot-kafka-zookeeper" + - title: Web terminal + url: "/web-terminal" + - title: Trigger a K8s Deployment from a DockerHub Push Event + url: "/trigger-a-k8s-deployment-from-docker-registry" + - title: Deploy to VM + url: "/packer-gcloud" + - title: Deploy to a VM via FTP + url: "/transferring-php-ftp" + - title: Deploy to Tomcat using SCP + url: "/deploy-to-tomcat-via-scp" + - title: Deploy with Helm + url: "/helm" + - title: Deploy with Terraform + url: "/terraform" + - title: Deploy with Pulumi + url: "/pulumi" + - title: Deploy to Nomad + url: "/nomad" + - title: Deploy to Heroku + url: "/deploy-to-heroku" + - title: Use kubectl as part of Freestyle step + url: "/use-kubectl-as-part-of-freestyle-step" + - title: Deploy with Kustomize + url: "/deploy-with-kustomize" + - title: Deploy to Docker Swarm + url: "/docker-swarm" + - title: Amazon ECS/Fargate + url: "/amazon-ecs" + - title: Elastic Beanstalk + url: "/elastic-beanstalk" +- title: Deployments + url: "/deployments" + pages: + - title: Kubernetes + url: "/kubernetes" + sub-pages: + - title: Deployment options for Kubernetes + url: "/deployment-options-to-kubernetes" + - title: Managing Kubernetes clusters + url: "/manage-kubernetes" + - title: Custom kubectl commands + url: "/custom-kubectl-commands" + - title: Helm + url: "/helm" + sub-pages: + - title: Using Helm in a Codefresh pipeline + url: "/using-helm-in-codefresh-pipeline" + - title: Managing Helm Releases + url: "/helm-releases-management" + - title: Using managed Helm repos + url: "/managed-helm-repository" + - title: Helm Charts and repositories + url: "/helm-charts-and-repositories" + - title: Custom Helm uploads + url: "/custom-helm-uploads" + - title: Promoting Helm environments + url: "/helm-environment-promotion" + - title: GitOps + url: "/gitops" + sub-pages: + - title: Creating GitOps applications + url: "/create-application" + - title: Monitoring GitOps applications + url: "/applications-dashboard" + - title: Managing GitOps applications + url: "/manage-application" + - title: Progressive delivery with GitOps + url: "/install-argo-rollouts" + - title: Images for GitOps + url: "/images" -- title: Installation - url: "/runtime" +- title: CI pipelines + url: "/pipelines" pages: - - title: Installation environments - url: "/installation-options" - - title: Set up a hosted runtime environment - url: "/hosted-runtime" - - title: Hybrid runtime requirements - url: "/requirements" - - title: Install hybrid runtimes - url: "/installation" - - title: Manage provisioned runtimes - url: "/monitor-manage-runtimes" - - title: Monitor provisioned hybrid runtimes - url: "/monitoring-troubleshooting" - - title: Add external clusters to runtimes - url: "/managed-cluster" - - title: Add Git Sources to runtimes - url: "/git-sources" + - title: Introduction to CI pipelines + url: "/introduction-to-codefresh-pipelines" + - title: Creating a CI pipeline + url: "/pipelines" + - title: Steps in CI pipelines + url: "/steps" + sub-pages: + - title: Git-clone + url: "/git-clone" + - title: Freestyle + url: "/freestyle" + - title: Build + url: "/build" + - title: Push + url: "/push" + - title: Composition + url: "/composition" + - title: Launch-composition + url: "/launch-composition" + - title: Deploy + url: "/deploy" + - title: Approval + url: "/approval" + - title: Conditional execution of steps + url: "/conditional-execution-of-steps" + - title: Post-step operations + url: "/post-step-operations" + - title: Triggers in CI pipelines + url: "/triggers" + sub-pages: + - title: Git triggers + url: "/git-triggers" + - title: DockerHub triggers + url: "/dockerhub-triggers" + - title: Azure triggers + url: "/azure-triggers" + - title: Quay triggers + url: "/quay-triggers" + - title: Helm triggers + url: "/helm-triggers" + - title: Artifactory triggers + url: "/jfrog-triggers" + - title: Timer (Cron) triggers + url: "/cron-triggers" + - title: Variables in CI pipelines + url: "/variables" + - title: Hooks in CI pipelines + url: "/hooks" + - title: Annotations in CI pipelines + url: "/annotations" + - title: Grouping steps into stages + url: "/stages" + - title: Caching for CI pipelines + url: "/pipeline-caching" + - title: Debugging CI pipelines + url: "/debugging-pipelines" + - title: Monitoring CI pipelines + url: "/monitoring-pipelines" + - title: Advanced workflows + url: "/advanced-workflows" + - title: Running CI pipelines locally + url: "/running-pipelines-locally" + - title: Configuration for CI pipelines + url: "/configuration" + sub-pages: + - title: Global CI pipeline settings + url: "/pipeline-settings" + - title: Shared configuration + url: "/shared-configuration" + - title: Secrets for CI pipelines + url: "/secrets-store" + - title: Public logs and status badges + url: "/build-status" + - title: Service containers + url: "/service-containers" + - title: Deployment environments + url: "/deployment-environments" + - title: Docker image metadata + url: "/docker-image-metadata" + - title: Pipeline definitions YAML + url: "/what-is-the-codefresh-yaml" -- title: Pipelines - url: "/pipelines" +- title: Workflows + url: "/workflows" pages: - - title: Creation + - title: Creating workflows url: "/create-pipeline" + - title: Nested workflows + url: "/nested-workflows" - title: Configure artifact repository url: "/configure-artifact-repository" - title: Selectors for concurrency synchronization url: "/concurrency-limit" - title: Sharing file systems url: "/sharing-file-system" - - title: Nested workflows - url: "/nested-workflows" - -- title: Deployment - url: "/deployment" - pages: - - title: Creating applications - url: "/create-application" - - title: Monitoring applications - url: "/applications-dashboard" - - title: Managing applications - url: "/manage-application" - - title: Images in Codefresh - url: "/images" - - title: Install Argo Rollouts - url: "/install-argo-rollouts" - -- title: Reports & Insights - url: "/reporting" - pages: - - title: Home dashboard - url: "/home-dashboard" - - title: DORA metrics - url: "/dora-metrics" - -- title: Image enrichment - url: "/integrations" + +- title: Clients + url: "/clients" pages: - - title: Image enrichment with integrations - url: "/image-enrichment-overview" - - title: CI integrations - url: "/ci-integrations" - sub-pages: - - title: Codefresh Classic - url: "/codefresh-classic" - - title: GitHub Actions - url: "/github-actions" - - title: Jenkins - url: "/jenkins" - - title: Issue tracking - url: "/issue-tracking" - sub-pages: - - title: Jira - url: "/jira" - - title: Container registries - url: "/container-registries" - sub-pages: - - title: Amazon ECR - url: "/amazon-ecr" - - title: Docker Hub Registry - url: "/dockerhub" - - title: GitHub Container Registry - url: "/github-cr" - - title: JFrog Artifactory - url: "/jfrog" - - title: Quay Registry - url: "/quay" + - title: Download CLI + url: "/csdp-cli" +- title: Installation + url: "/installation" + pages: + - title: Installation environments + url: "/installation-options" + - title: Runtime architectures + url: "/runtime-architecture" + - title: Codefresh Runner installation + url: "/codefresh-runner" + - title: Hosted GitOps Runtime installation + url: "/hosted-runtime" + - title: Hybrid GitOps Runtime installation + url: "/hybrid-gitops" + - title: On-Premises installation + url: "/codefresh-on-prem" + - title: On-Premises upgrade + url: "/codefresh-on-prem-upgrade" + - title: Monitoring & managing GitOps Runtimes + url: "/monitor-manage-runtimes" + - title: Add external clusters to GitOps Runtimes + url: "/managed-cluster" + - title: Add Git Sources to to GitOps Runtimes + url: "/git-sources" + - title: Administration url: "/administration" pages: - - title: Manage users - url: "/add-users" - - title: User settings - url: "/user-settings" - - title: Set up OAuth2 authentication for Git providers - url: "/oauth-setup" - - title: Authorize access to organizations/projects - url: "/hosted-authorize-orgs" - - title: Single Sign-On - url: "/single-sign-on" + - title: Account & user management + url: /account-user-management + sub-pages: + - title: Create a Codefresh account + url: "/create-codefresh-account" + - title: Adding users and teams + url: "/add-users" + - title: Configuring access control + url: "/access-control" + - title: Setting up OAuth2 for Git providers + url: "/oauth-setup" + - title: Authorize access to organizations/projects + url: "/hosted-authorize-orgs" + - title: Auditing actions in Codefresh + url: "/audit" + - title: Codefresh IP addresses + url: "/platform-ip-addresses" + - title: User self-management + sub-pages: + - title: Managing personal settings + url: "/user-settings" + - title: Managing Git PATs + url: "/manage-pats" + +- title: Single Sign-On + url: /single-sign-on + pages: + - title: SSO Overview + url: /sso-overview + - title: Common configuration + url: /team-sync + - title: OpenID Connect + url: /oidc sub-pages: - - title: SAML2 - url: "/sso-setup-saml2" - - title: OpenID Connect - url: "/sso-setup-oauth2" - title: Auth0 - url: "/sso-auth0" + url: /oidc-auth0 - title: Azure - url: "/sso-azure" + url: /oidc-azure - title: Google - url: "/sso-google" - - title: LDAP - url: "/sso-ldap" + url: /oidc-google + - title: Okta + url: /oidc-okta + - title: OneLogin + url: /oidc-onelogin + - title: SAML + url: /saml + sub-pages: + - title: JumpCloud + url: /saml-jumpcloud - title: Okta - url: "/sso-okta" + url: /saml-okta - title: OneLogin - url: "/sso-onelogin" - - title: Access Control - url: "/access-control" - - title: Audit - url: "/audit" - - title: Codefresh IP addresses - url: "/platform-ip-addresses" + url: /saml-onelogin + - title: PingID SSO + url: /saml-pingid + - title: LDAP + url: /ldap + + - title: Reference url: "/reference" @@ -183,18 +364,8 @@ - title: Shared configuration repo url: "/shared-configuration" -- title: What's New? - url: "/whats-new" - pages: - - title: What's new in Codefresh? - url: "/whats-new" -- title: Troubleshooting - url: "/troubleshooting" - pages: - - title: Runtimes - url: "/runtime-issues" - + - title: Terms and Privacy Policy url: "/terms-and-privacy-policy" @@ -205,3 +376,5 @@ url: "/privacy-policy" - title: Service Commitment url: "/sla" + + diff --git a/_docs/administration/access-control.md b/_docs/administration/access-control.md deleted file mode 100644 index 3c55b8cc..00000000 --- a/_docs/administration/access-control.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: "Access control" -description: "" -group: administration -toc: true - ---- -Access control defines the access policy for resources within an enterprise. -In Codefresh, access control to an entity is derived from the entity type, which can be categorized into one of the following: - -* **GitOps-controlled entities** - Entities whose entire lifecyle - creation, update, and deletion, are fully GitOps-controlled. - Examples of such entities in Codefresh include: - * Runtimes - * Git Sources - * Pipelines comprising Argo Workflow/Events resources such as the Workflow Template, Sensor, Event Sources - * Applications comprising Argo CD/Rollouts resources project, Application Set, applications, rollout - -* **Non-GitOps-controlled entities** - - Entities reported to Codefresh as built artifacts, not GitOps-controlled. - - Examples of such entities in Codefresh include: - * Images - -* **Account-configuration entities (currently non-GitOps-controlled)** - - Entities whose state is not currently stored in a Git repository. - Examples of such entities in Codefresh include: - - * Account configuration collaborators - * Account configuration security - * Account configuration Single Sign-On (SSO) - * Billing - - -### GitOps-controlled entities -Codefresh stores the state of your account entities according to GitOps principles and policies. - -#### Write permissions -Users with write permissions can access and manage files directly in the Git repository. Any action on the file such as create, update, or delete, is immediately reflected in the user account. - -Any user action via a Codefresh client (UI or CLI), on a GitOps-controlled resource, is impersonated with the user's Git permissions. If the user does not have permissions for an action in Git, then the user is automatically denied access to the same action in a Codefresh client. - -For Codefresh to impersonate the user, the user must provide Git credentials for every runtime. The credentials are securely stored by the Codefresh application proxy. -The Codefresh application proxy uses these credentials: -* For Git-provider operations -* To update Codefresh with the read/write permissions to all existing repositories linked to the Git Source defined for a runtime. The Codefresh client can perform client-side validations. - -To add your Git personal token, in the Codefresh UI, go to your avatar and then select [user settings](https://g.codefresh.io/2.0/user-settings). - -{% include -image.html -lightbox="true" -file="/images/administration/access-control/pat.png" -url="/images/administration/access-control/pat.png" -alt="Add personal access token" -caption="Add personal access token" -max-width="30%" -%} - -#### Read permissions -Codefresh enforces read permissions by checking if the user has Git permissions to view the Kubernetes manifest in the repository. -Read permissions to entities created dynamically from changes in resource state, are inherited from the parent entity's permissions. - -From the user's perspective, this means that: - -* If the user does not have read permissions from the Git provider for the Sensor's Kubernetes manifest, the user does not have visibility into pipelines. - Workflow entities that are dynamically created, derive their read permissions from pipeline permissions. - -* Notifications are displayed only for resources with read permissions. - - -> Currently, we do not enforce Analytics views according to read permissions for pipelines. - -#### Write operations on dynamically-created entities -These are operations users can perform on dynamically-created entities, such as workflows for example. Typically, the permissions for such entities are derived from those of the parent entity. - -Currently, all users with view permissions, can also terminate and retry workflows. - - -### Non-GitOps-controlled entities -For now, users can view all `image` entity types. These are resources reported to Codefresh as built artifacts, but not stored using the GitOps approach. - -### Account-configuration for non-GitOps-controlled entities -All account-configuration entities you have access to are listed in your account settings, and are exposed only to account admins. - -When adding a user account, you can assign the `admin` role to the user. The `admin` role automatically enables all account-configurations. - -### Runtime account-configuration -Runtime configuration is also exposed in the account settings dedicated area and only exposed to admins but is fully controlled via the GitOps approach after installation.
- -Users with write permissions to the runtime installation repository in Git can make changes to the runtime, and create, update, or delete Git Sources defined for that runtime. -We are at present exposing the runtime configuration under the account settings only to account admins. -Be aware though that these can also be changed directly through Git by users who are not admin users in Codefresh.
- -For now, Codefresh admin users can see all runtimes and Git Sources even if they don't have read permissions to the underlying Git repository. - - -### Upcoming enhancements to access control -We are working to enhance our access control model by adding another layer to provide the ability to define: -* Permissions on write operations for entities that are non-GitOps controlled, such as account configuration and workflow operations -* Read permissions for entities that are completely non-GitOps controlled -* A more granular permission model for entities that are GitOps-controlled, but without sufficient access control policies in place -* A more granular permission model for dynamic resources that are non-GitOps controlled, but created from a GitOps-controlled entity, for example, workflows - -### What to read next -[Codefresh architecture](({{site.baseurl}}/docs/getting-started/architecture/)) diff --git a/_docs/administration/account-user-management/access-control.md b/_docs/administration/account-user-management/access-control.md new file mode 100644 index 00000000..854374cb --- /dev/null +++ b/_docs/administration/account-user-management/access-control.md @@ -0,0 +1,248 @@ +--- +title: "Configuring access control" +description: "Restrict resources in a company environment" +group: administration +sub_group: account-user-management +redirect_from: + - /docs/enterprise/access-control/ + - /docs/enterprise-account-mng/ent-account-mng/ + - /docs/enterprise/ent-account-mng/ + - /docs/administration/ent-account-mng/ +toc: true + +--- + +Codefresh provides seral complementary ways for access control within an organization: + +* **Role-based access**: [Role-based access](#users-and-administrators), restricts access to parts of the Codefresh UI intended for account administrators. For example, only an account administrator should be able to change integrations with [git providers]({{site.baseurl}}/docs/integrations/git-providers/) and [cloud services]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/). + +* **Attribute-based access control (ABAC)**: Policy-based access control via attributes (ABAC), restricts access to [Kubernetes clusters and pipelines](#access-to-kubernetes-clusters-and-pipelines). This option allows account administrators to define exactly which teams have access to which clusters and pipelines. For example, access to production clusters can be granted only to a subset of trusted developers/operators. On the other hand, access to a QA/staging cluster can be less strict. + +* **Git-repository access**: Restrict the Git repositories used to load [pipeline definitions](#pipeline-definition-restrictions). + + +## Role-based access for users and administrators + +Role-based access, as either a user or an administrator, is usually defined when you [add users to Codefresh accounts]({{site.baseurl}}/docs/administration/add-users/#users-in-codefresh). + +> To add users and assign or change user roles, you must be an administrator yourself. + + +{% include + image.html + lightbox="true" + file="/images/administration/users/invite-users.png" + url="/images/administration/users/invite-users.png" + alt="User roles for access control" + caption="User roles for access control" + max-width="90%" +%} + +The table below lists the functionality available for role-based access. + +{: .table .table-bordered .table-hover} +| Functionality | Available for Role | +| -------------- | -------------- | +|Run pipelines | `User` and `Admin`| +|View Docker images | `User` and `Admin`| +|Inspect text reports | `User` and `Admin`| +|[Git Integrations]({{site.baseurl}}/docs/integrations/git-providers/) | `Admin`| +|[External docker registry settings]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) | `Admin`| +|[External Helm repositories]({{site.baseurl}}/docs/new-helm/add-helm-repository/) | `Admin`| +|[Cloud provider settings]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/) | `Admin`| +|[Cloud storage settings]({{site.baseurl}}/docs/testing/test-reports/#connecting-your-storage-account) | `Admin`| +|[Shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) | `Admin`| +|[API token generation]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions) | `Admin`| +|[SSO Settings]({{site.baseurl}}/docs/administration/single-sign-on/) | `Admin`| +|[Runtime environment selection]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) | `Admin`| +|[Slack settings]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) | `Admin`| +|[Audit logs]({{site.baseurl}}/docs/administration/audit-logs/) | `Admin`| +|ABAC for Kubernetes clusters | `Admin`| +|Billing and charging | `Admin`| + + + +## ABAC access control for Kubernetes clusters and pipelines + +ABAC (Attribute-Based Access Control), allows fine-grained access to Kubernetes clusters and pipelines. See ([ABAC](https://en.wikipedia.org/wiki/Attribute-based_access_control){:target="\_blank"}. + +ABAC access control includes: + +1. Assigning custom attributes to your Kubernetes clusters +1. Assiging custom attributes to your pipelines +1. Defining rules as policies using teams, clusters, and attributes (who, what, where) + + + +### Add Kubernetes clusters with policy attributes + +After adding Kubernetes clusters, you can configure clusters with multiple tags. + +Tag names are arbitrary, and can be anything you choose that matches your company process. You can tag your clusters with product names, software lifecycle phases, department names, or name that helps your security policies. + +You can assign multiple tags to each cluster, making it easy to define multiple policies on the same cluster. For example, per project and per team. + +{% include image.html + lightbox="true" + file="/images/administration/access-control/kubernetes-abac.png" + url="/images/administration/access-control/kubernetes-abac.png" + alt="Cluster tags" + caption="Cluster tags" + max-width="70%" + %} + +**Before you begin** +* If needed, [add a Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/add-kubernetes-cluster/) + +**How to** + +1. Expand the provider under which you added the cluster. +1. Mouse over the cluster to which to add tags or attributes, and then click **Edit tags** on the right. + The Tags page displays existing tags if any, and allows you to add multiple tags for a single cluster. + + +{% include image.html + lightbox="true" + file="/images/administration/access-control/tagging-kubernetes-clusters.png" + url="/images/administration/access-control/tagging-kubernetes-clusters.png" + alt="Assigning tags to a cluster" + caption="Assigning tags to a cluster" + max-width="60%" + %} +1. Click **Add** and type in the tag. +1. Continue to add tags and when finished, click **Save**. + +>By default, all clusters, with and without tags, are displayed and can be edited by all users (but not deleted). As soon as you add at least one tag to a cluster, the cluster is only accessible to users with the required policy rules (explained in the next sections). + +### Configure CI pipelines with policy attributes + +Similar to Kubernetes clusters, you can also add tags to specific pipelines. + +**Before you begin** +* If needed, [create a CI pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) + +**How to** + +1. In the Codefresh UI, go to [Pipelines](https://g.codefresh.io/pipelines/all/){:target="\_blank"}. +1. In the row with the target pipline, click the context menu for the pipeline, and then select **Edit tags**. +1. Type in the new tag, press Enter, and continue to add the tags you need. +1. When finished, click **Save**. + + +{% include image.html + lightbox="true" + file="/images/administration/access-control/pipeline-tags.png" + url="/images/administration/access-control/pipeline-tags.png" + alt="Assigning attributes to a pipeline" + caption="Assigning attributes to a pipeline" + max-width="80%" + %} + + +### Define rules for access control +Define security rules using the *who, what, where* pattern to control access to clusters and pipelines by departments, projects, roles etc. + +For each rule you define, select: +1. The team the rule applies to +1. Cluster privileges (*Create/delete/read/update*) or pipeline privileges (*Create/delete/read/run/update*) +1. Effective tags + + +**Before you begin** +* Make sure you have [created at least one team]({{site.baseurl}}/docs/administration/add-users/#create-a-team-in-codefresh) + +**How to** +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Permissions**](https://g.codefresh.io/account-admin/permissions/teams){:target="\_blank"}. +1. For each entity, do the following to define a rule: + 1. Select the team to which assign the rule. + 1. Select the permissions to assign to the team for that entity. + 1. Select either all clusters with tags (**All tags**) or all clusters that are untagged (**Without tags**). + + {% include image.html + lightbox="true" + file="/images/administration/access-control/kubernetes-policies.png" + url="/images/administration/access-control/kubernetes-policies.png" + alt="Kubernetes policies" + caption="Kubernetes policies" + max-width="80%" + %} + +### Description of privileges + +**For clusters:** + +* `Create`: cluster creation requires someone to be account administrator anyway so currently this permission isn’t really necessary . +* `Read` - can only see existing allowed clusters without any ability to change them. +* `Update` - can see and edit existing allowed cluster resources (which means also perform [installation, removal and rollbacks of Helm charts]({{site.baseurl}}/docs/new-helm/helm-best-practices/)). Tags are managed from account settings, so this permission doesn’t apply to it currently. +* `Delete` - cluster removal requires someone to be account administrator anyway so currently this permission isn’t really necessary. + +For pipelines: + +* `Create` - can only create new pipelines, not see, edit (which includes tagging them) or delete them. This permission should also go hand in hand with additional permissions like read/edit untagged pipelines. +* `Read` - view allowed pipelines only. +* `Update` - see and edit allowed pipelines only (including tagging them). +* `Delete` - can delete allowed pipelines only. +* `Run` - can run allowed pipelines only. +* `Approve` - resume pipelines that are waiting for manual [approval]({{site.baseurl}}/docs/pipelines/steps/approval/). +* `Debug` - allow the usage of the [pipeline debugger]({{site.baseurl}}/docs/pipelines/debugging-pipelines/). + + + +## Git-repository access restrictions + +By default, users can load pipeline definitions when [creating a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/), from the inline editor, or any private or public Git repository. + +You can change the default behavior to restrict loading CI pipeline definitions from specific Git repositories or completely disable loading the definitions from all Git repositories. + +### Enable/disable access to pipeline YAMLs by source +Enable or disable access to pipeline definition YAMLs based on the source of the YAML. These global settings are effective for all pipelines in the account and enables or disables that method of pipeline creation from the Codefresh UI. +pipeline definitions from: + * The inline editor in the Codefresh UI: Disabling the inline editor for example, disables new and _all existing pipelines_ + with pipeline definitions defined in the Codefresh editor. The Run button is disabled for all such piplines. + * Any Git repository connected to Codefresh + * **Any** public URL + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From Configuration on the sidebar, select [**Pipeline Settings**](https://g.codefresh.io/account-admin/account-conf/pipeline-settings){:target="\_blank"}. + + {% include image.html + lightbox="true" + file="/images/administration/access-control/pipeline-restrictions.png" + url="/images/administration/access-control/pipeline-restrictions.png" + alt="Global pipeline restrictions" + caption="Global pipeline restrictions" + max-width="80%" + %} + +1. Turn on or off the options as needed. +1. Continue with + +### Define access to Git repositories for pipeline YAMLs +If access to pipeline definitions are enabled for Git repositories, you can configure fine-grained restrictions through the integrations settings for your [Git provider]({{site.baseurl}}/docs/integrations/git-providers/). + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From Configuration on the sidebar, select [**Pipeline Integrations**](https://g.codefresh.io/account-admin/account-conf/integration){:target="\_blank"}. +1. Select the Git provider integration, click **Edit**. +1. Scroll down and expand **YAML Options**. + + {% include image.html + lightbox="true" + file="/images/administration/access-control/pipeline-git-restrictions.png" + url="/images/administration/access-control/pipeline-git-restrictions.png" + alt="Pipeline restrictions per Git provider" + caption="Pipeline restrictions per Git provider" + max-width="80%" + %} + +{:start="5"} +1. Configure restrictions for Git repositories that can be used for pipeline definitions: + * **Allow only the following repositories**: Toggle **Manual selection** to on, and then select the Git repos, or define a regex according to which to select repos. + * **Allow only the following branches**: Select Git repositories by the branches that match the regex. For example, this regex `/^((pipeline-definition)$).*/g`, allows users to load pipeline YAMLs only from a branch named `pipeline-definition` in a Git repository. + * **Allow only the following paths**: Select Git repositories by folders within the repo that match the glob pattern). + + + +## Related articles +[Codefresh installation options]({{site.baseurl}}/docs/administration/installation-security/) +[Managing your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) diff --git a/_docs/administration/account-user-management/add-users.md b/_docs/administration/account-user-management/add-users.md new file mode 100644 index 00000000..6177e2c3 --- /dev/null +++ b/_docs/administration/account-user-management/add-users.md @@ -0,0 +1,115 @@ +--- +title: "Adding users and teams" +description: "Add users and teams to Codefresh accounts" +group: administration +sub_group: account-user-management +toc: true +--- + +Once you have created a Codefresh account, you can add any number of users to collaborate on repositories, workflows, and pipelines, and teams of users. +You can then create teams in Codefresh to group users who share a common denominator, such as the same permissions, access to the same functionality, or roles. Teams make it easy for administrators to both define and manage items shared by multiple users in an orgranization. + + +## Users in Codefresh +Adding a user requires assigning a role to define access to account resources, and optionally, selecting an SSO provider for the user: + +* **Role**: Defines the user's access level to the resources in the account. + * **User**: The default. With this role, users can work with your repositories and pipelines, but cannot change settings +on clusters, docker registries, git integrations, shared configurations etc. + * **Administrator**: User with this role have full access to your account and can change all your settings, so make sure that they are trusted colleagues. + For guidelines on access control, see [Access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/). +* **SSO**: By default, SSO is not enabled for users. If required, explicitly select the SSO provider. For an overview of SSO, see [Single Sign on]({{site.baseurl}}/docs/single-sign-on/). + + +### Add a user to a Codefresh account +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select **Users**, and then select **+ [Add User]**. +1. Type the **User's email address**, and click **Invite**. + + The user receives an email invitation, and in the Users list, the username is set to Pending, and status to Resend. +1. From the **Role** dropdown, select either **User** or **Administrator**. +1. If SSO is configured for the account, **Select SSO provider**. + + + +### Manage users in a Codefresh account + +Once you add a user to your Codefresh account, you can do the following to manage that user: +* Resend invitations that are pending acceptance: Select ![](/images/administration/users/icon-Send.png?display=inline-block) **Resend**. +* Edit the user's email address: Select ![](/images/administration/users/icon-Edit.png?display=inline-block) **Edit**. +* Change the role: From the **Role** dropdown, select the new role. +* Change SSO provider: From the **SSO** dropdown, select the new SSO provider. +* Remove the user account: Select ![](/images/administration/users/icon-Delete.png?display=inline-block) **Delete**. + + + +## Teams in Codefresh +Teams are users who share the same permissions, roles, or as required and defined according to company processes. Teams allow you to enforce access control through ABAC (Attribute Based Access Control). +By default, there are two teams: +* Users +* Admins with users [invited as collaborators]({{site.baseurl}}/docs/accounts/assign-a-user-to-a-team/) + +> Only Enterprise customers can add new teams. Other Codefresh plans can only use the predefined *Users* and *Admin* teams. [Contact us](https://codefresh.io/contact-us/){:target="\_blank"} to upgrade to an Enterprise plan. + +### Create a team in Codefresh + +Create a team in Codefresh and then assign users to the team. You can assign the same user to multiple teams, as in most companies, users have overlapping roles. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **User Management**. +1. From the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select **Teams**, and then select **Create a Team**. +1. Enter the **Team Name**. + > The team name can include only lower-case alphanumeric characters and hyphens, without spaces. + + See the screenshot below for some sample team names. + +{% include image.html + lightbox="true" + file="/images/administration/access-control/teams.png" + url="/images/administration/access-control/teams.png" + alt="Examples of teams in Codefresh" + caption="Examples of teams in Codefresh" + max-width="80%" + %} + +### Assign a user to a team +1. To assign users to the team, do the following: + 1. Hover over the team name and click the **Settings** icon. + 1. Click **Invite to team**, type the email address of the user to invite, and then click **Add**. +1. To change the name of the team, click **Edit** and type the new name. + +## Define session timeouts and domain restrictions for user accounts +As an administrator, you can optionally define session timeouts to automatically log out users who have been inactive for the specified duration, and restrict invitations to specific email domains. + +> The maximum duration for inactivity is 30 days. Inactive users are warned 15 minutes before they are logged out. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select **Security**. +1. For **User Session**, add the timeout duration in minutes/hours/days. + + {% include image.html + lightbox="true" + file="/images/administration/access-control/security-timeout.png" + url="/images/administration/access-control/security-timeout.png" + alt="Security timeout" + caption="Security timeout" + max-width="90%" + %} + +{:start="5"} +1. To restrict invitations to specific email domains, in the **Email domains** field below User Invitations, type in the domains to allow, one per line. + +## Troubleshoot add users + +* [User is prompted to enter an organization name](https://support.codefresh.io/hc/en-us/articles/360020177959-User-is-prompted-to-enter-an-organization-name) +* [Account invitation not permitting login](https://support.codefresh.io/hc/en-us/articles/360015251000-Account-invitation-not-permitting-login) + + + +## Related articles +[Access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Single Sign on]({{site.baseurl}}/docs/single-sign-on/) +[OAuth authentication for Git providers]({{site.baseurl}}/docs/administration/account-user-management/oauth-setup) + diff --git a/_docs/administration/account-user-management/audit.md b/_docs/administration/account-user-management/audit.md new file mode 100644 index 00000000..0fa2d9d9 --- /dev/null +++ b/_docs/administration/account-user-management/audit.md @@ -0,0 +1,111 @@ +--- +title: "Audit logs" +description: "Get a list of all actions in Codefresh" +group: administration +sub_group: account-user-management +redirect_from: + - /docs/enterprise/audit-logs/ +toc: true +--- + +Codefresh keeps a log of all actions that happen at all times based on API calls that reach Codefresh. +The time frames covered by audit logs depends on the pricing tier of your Codefresh account. + +The audit log includes: +* UI actions from users +* [CLI](https://codefresh-io.github.io/cli/) invocations +* Any [external integrations]({{site.baseurl}}/docs/integrations/codefresh-api/) used with Codefresh + +You can: +* View, filter, and search for audited events +* View API payload for an event +* Download the audit log file in CSV + +## View audit logs +The Audit Log is divided into actions audited (All Audit), and tiggers and webhooks processed by Codefresh (Triggers). + + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. On the sidebar, from Access & Collaboration, select [**Audit**](https://g.codefresh.io/account-admin/audit/audit-all){:target="\_blank"}. +1. To focus on a specific time frame, select the date range from the toolbar. + The All Audit tab includes all Codefresh events in your account, sorted by the most recent events. + Each event shows the following details: + * `Entity ID/Name`: The entity that was affected. + * `Entity type`: The type of entity on which the action was action, such as user, team, build, pipeline, project, etc. + * `Action`: The action that was taken on the entity. + * `Status`: The result of the API call. + * `User`: The name of the user who performed the action. + * `Last Request`: The time of the event. + + +{% include image.html +lightbox="true" +file="/images/administration/audit/audit-logs.png" +url="/images/administration/audit/audit-logs.png" +alt="Audit Logs view" +caption="Audit Logs view" +max-width="70%" +%} + + +The Triggers tab includes all the triggers/webhooks that were processed by Codefresh, with the same information as the Audit tab. + +{% include image.html +lightbox="true" +file="/images/administration/audit/audit-triggers.png" +url="/images/administration/audit/audit-triggers.png" +alt="Audit Triggers view" +caption="Audit Triggers view" +max-width="70%" +%} + + +Both tabs have built-in paging and filtering. + + + +### Filter audited events + +Filter audited events to focus on a specific entity or user. + +{% include image.html +lightbox="true" +file="/images/administration/audit/audit-filter.png" +url="/images/administration/audit/audit-filter.png" +alt="Filtering audit actions" +caption="Filtering audit actions" +max-width="40%" +%} + + +### Get more details for audited events + +You can get the exact API payload for each event as it was sent to Codefresh, including the URL and other call parameters used for the selected event. + +* At the right of the row with the event, click the **More Details** (book) icon. + + +{% include image.html +lightbox="true" +file="/images/administration/audit/api-call-details.png" +url="/images/administration/audit/api-call-details.png" +alt="API call details for audited event" +caption="API call details for audited event" +max-width="40%" +%} + + + +## Export audit logs + +Export all audited events, both Audits and Triggers, to a `CSV` file, for offline processing with your own tools or for viewing in external applications such as Microsoft Excel. + +* On the top right of the toolbar, click **Download Audit**. + The downloaded file includes in addition to the events themselves, the API call information (payload and parameters) for each event. + + + +## Related articles +[Codefresh installation options]({{site.baseurl}}/docs/installation/installation-security/) +[Configuring access Control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/) diff --git a/_docs/administration/account-user-management/create-codefresh-account.md b/_docs/administration/account-user-management/create-codefresh-account.md new file mode 100644 index 00000000..15824668 --- /dev/null +++ b/_docs/administration/account-user-management/create-codefresh-account.md @@ -0,0 +1,219 @@ +--- +title: "Create a Codefresh account" +description: "Welcome to Codefresh!" +group: administration +sub_group: account-user-management +redirect_from: + - /docs/ + - /docs/create-an-account/ + - /docs/getting-started/ + - /docs/getting-started/introduction/ +--- +Before you can do anything in Codefresh such as building and deploying your applications, you need to create a Codefresh account. + +Creating an account in Codefresh is free (no credit card is required) and can be done in three simple steps + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/create-account-steps.png" +url="/images/administration/create-account/create-account-steps.png" +alt="Codefresh account creation steps" +max-width="90%" +%} + +## Step 1: Select your Identity Provider +As the first step in setting up ypur account in Codefresh, select the identity provider (IdP) to use. +Codefresh currently supports the following IdPs: +* GitHub +* Bitbucket +* GitLab +* Azure +* Google +* LDAP + +If you need an IdP that is not in the list, please [contact us](https://codefresh.io/contact-us/) with the details. + +>NOTES: + For Git repositories, the login method is less important, as you can Git repositories through [Git integrations]({{site.baseurl}}/docs/integrations/git-providers/), regardless of your sign-up process. + + If you multiple sign-up methods, as long as you use the same email address in all the sign-ups, Codefresh automatically redirects you to the account dashboard. + +1. Go to the [Codefresh Sign Up page](https://g.codefresh.io/signup). + + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/select-identity-provider.png" +url="/images/administration/create-account/select-identity-provider.png" +alt="Codefresh sign-up page" +caption="Codefresh sign-up page" +max-width="40%" +%} + +{:start="2"} +1. Select the IdP for sign-up. +1. Continue with [Step 2: Accept the permissions request](#step2-accept-the-permissions-request) + + + +## Step 2: Accept the permissions request + +After you select the IdP (identity provider), Codefresh requests permission to access your basic details, and for Git providers, to access your Git repositories. The Permissions window that is displayed differs according to the IdP selected in the previous step. + +Don't worry, Codefresh will not do anything without your explicit approval, so don't be scared by the permissions shown +in the request window. The permissions requested by Codefresh are needed in order to build and deploy your projects. + +1. Do any of the following: + * For GitHub: To continue, click **Authorize codefresh-io**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/github-authorize.png" +url="/images/administration/create-account/github-authorize.png" +alt="GitHub authorization page" +caption="GitHub authorization page" +max-width="50%" +%} + + * For Bitbucket: To continue, click **Grant access**. + + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/bitbucket-authorize.png" +url="/images/administration/create-account/bitbucket-authorize.png" +alt="Bitbucket authorization page" +caption="Bitbucket authorization page" +max-width="50%" +%} + + * For GitLab: To continue, click **Authorize**. + + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/gitlab-authorize.png" +url="/images/administration/create-account/gitlab-authorize.png" +alt="GitLab authorization page" +caption="GitLab authorization page" +max-width="50%" +%} + + Once you confirm the permissions for your Git provider, Codefresh automatically connects to your Git provider and fetches your basic account details, such as your email. + +{:start="2"} +1. Continue with [Step 3: Verify account details](#step-3-verify-account-details). + +## Step 3: Verify account details + +Verifying account details is the final step in creating your Codefresh account. + +1. Review the details for your new account, make the relevant changes, and click **NEXT**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-signup.png" +url="/images/administration/create-account/codefresh-signup.png" +alt="Codefresh account details" +caption="Codefresh account details" +max-width="40%" +%} + +{:start="2"} +1. Enter a name for your account, and click **NEXT**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-accountname.png" +url="/images/administration/create-account/codefresh-accountname.png" +alt="Codefresh account name" +caption="Codefresh account name" +max-width="40%" +%} + +{:start="3"} +1. Finally, answer the questions to personalize your account and click **FINISH**. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-personalize.png" +url="/images/administration/create-account/codefresh-personalize.png" +alt="Codefresh personalize account" +caption="Codefresh personalize account " +max-width="40%" +%} + +Congratulations! Your new Codefresh account is now ready. + +{% include +image.html +lightbox="true" +file="/images/administration/create-account/codefresh-dashboard.png" +url="/images/administration/create-account/codefresh-dashboard.png" +alt="Codefresh dashboard" +caption="Codefresh dashboard" +max-width="40%" +%} + + + + +## Related articles +[Adding users and teams]({{site.baseurl}}/docs/administration/account-user-management/add-users/) +[Configuring access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Codefresh IP addresses]({{site.baseurl}}/docs/administration/account-user-management/platform-ip-addresses/) +[Create a basic pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) +[Pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Deploy to Kubernetes]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) + + diff --git a/_docs/administration/hosted-authorize-orgs.md b/_docs/administration/account-user-management/hosted-authorize-orgs.md similarity index 100% rename from _docs/administration/hosted-authorize-orgs.md rename to _docs/administration/account-user-management/hosted-authorize-orgs.md diff --git a/_docs/administration/oauth-setup.md b/_docs/administration/account-user-management/oauth-setup.md similarity index 92% rename from _docs/administration/oauth-setup.md rename to _docs/administration/account-user-management/oauth-setup.md index be8d4e21..632cf4a7 100644 --- a/_docs/administration/oauth-setup.md +++ b/_docs/administration/account-user-management/oauth-setup.md @@ -1,7 +1,8 @@ --- -title: "Set up OAuth2 authentication for Git providers" +title: "Seting up OAuth2 for Git providers" description: "" group: administration +sub_group: account-user-management toc: true --- @@ -32,7 +33,7 @@ Codefresh supports OAuth2 or personal access tokens (PATs) for authentication: -### Authentication for Git providers and runtime accounts +## Authentication for Git providers and runtime accounts The [Authentication](https://g.codefresh.io/2.0/account-settings/authentication?providerName=github){:target="\_blank"} page displays the accounts by Git provider and the authentication method selected for the same. Authentication accounts are organized by Runtimes. A runtime can have a single authentication account. @@ -50,7 +51,7 @@ The Type column identifies the authentication for the provider account as either As the account administrator, you can change the authentication method for an account at any time to either Codefresh or Custom, or manual token entry. See [Select authentication mechanism for runtime](#select-authentication-mechanism-for-runtime). -### Create a custom OAuth2 Application for Git provider +## Create a custom OAuth2 Application for Git provider Create a custom OAuth2 Application for Codefresh in your Git provider accounts with the correct scopes, and set up authentication for the same within Codefresh. Users in Codefresh can then authorize access to the Git provider using OAuth2, instead of a personal access token. Supported Git providers: @@ -71,7 +72,7 @@ To set up OAuth2 authorization in Codefresh, you must:
{:/} -#### Step 1: Create a custom OAuth2 Application in Git +### Step 1: Create a custom OAuth2 Application in Git Create and register an OAuth App under your organization to authorize Codefresh. 1. Follow the step-by-step instructions for your Git provider: @@ -111,7 +112,7 @@ Create and register an OAuth App under your organization to authorize Codefresh.
{:/} -#### Step 2: Create a K8s secret resource in the runtime cluster +### Step 2: Create a K8s secret resource in the runtime cluster Create a K8s secret in the runtime cluster, using the example below as a guideline. You must define the application ID (`appId`), client ID (`clientId`) and the client secret (`clientSecret`) from the OAuth2 Application you created in your Git provider, and the Git URL (`url`). > All fields in the secret _must be_ encoded in `base64`. @@ -154,7 +155,7 @@ data:
{:/} -#### Step 3: Configure OAuth2 settings for Custom Application in Codefresh +### Step 3: Configure OAuth2 settings for Custom Application in Codefresh Configure the settings for the Custom OAuth2 Application in Codefresh. Configuring the settings creates a K8s ConfigMap that references the OAuth secret credentials. When configuring the settings, you can work in Form mode, or directly update the YAML manifest. @@ -210,9 +211,16 @@ Configure the settings for the Custom OAuth2 Application in Codefresh. Configuri You have completed the setup to authorize Codefresh as an OAuth App for your Git provider. -### Select authentication mechanism for runtime +## Select authentication mechanism for runtime For a Git provider and a runtime account, select the authentication mechanism: Codefresh account, Custom provider account if one exists, or token-based authentication. 1. In the Codefresh UI, go to [Authentication](https://g.codefresh.io/2.0/account-settings/authentication?providerName=github){:target="\_blank"}. 1. Select the runtime, and click ![](/images/administration/users/icon-Edit.png?display=inline-block) **Edit**. -1. Select the OAuth authentication provider account. \ No newline at end of file +1. Select the OAuth authentication provider account. + + +## Related articles +[Adding users and teams]({{site.baseurl}}/_docs/administration/account-user-management/add-users/) +[Configuring access control]({{site.baseurl}}/docs/administration/account-user-management/access-control/) +[Codefresh IP addresses]({{site.baseurl}}/docs/administration/account-user-management/platform-ip-addresses/) + \ No newline at end of file diff --git a/_docs/administration/platform-ip-addresses.md b/_docs/administration/account-user-management/platform-ip-addresses.md similarity index 64% rename from _docs/administration/platform-ip-addresses.md rename to _docs/administration/account-user-management/platform-ip-addresses.md index 2676689f..bb818bc1 100644 --- a/_docs/administration/platform-ip-addresses.md +++ b/_docs/administration/account-user-management/platform-ip-addresses.md @@ -2,17 +2,18 @@ title: "Codefresh IP addresses" description: " " group: administration +sub_group: account-user-management toc: true --- Access to Kubernetes clusters behind strict firewalls not accessible from the public internet is governed through authorized IP addresses. Codefresh provides a list of IP addresses to be configured on clusters to allow access to them. -You can register multiple external clusters to Codefresh runtimes, both hosted and hybrid. All runtimes require Codefresh platform IPs to be configured on the clusters. -In addition, managed clusters registered to hosted runtimes must be configured with a set of specific IP addresses to authorize access. +You can register multiple external clusters to the Codefresh Runner and GitOps Runtimes. All Runtimes require Codefresh platform IPs to be configured on the clusters. +In addition, managed clusters registered to Hosted GitOps Runtimes must be configured with a set of specific IP addresses to authorize access. -### Codefresh platform IPs (updated July 31st 2021) +## Codefresh platform IPs (updated July 31st 2021) All the IPs are NAT gateways, and need to enable specific IPs instead of ranges. @@ -38,13 +39,13 @@ All the IPs are NAT gateways, and need to enable specific IPs instead of ranges. - 44.238.167.159 - 44.237.63.217 -### Codefresh IPs for managed clusters in hosted runtimes +## Codefresh IPs for Hosted GitOps Runtimes - 34.207.5.18 - 34.232.79.230 - 44.193.43.5 -### Define API access to IPs for clusters +## API access to IPs for clusters Clusters must be configured with API access to the authorized Codefresh IPs. If you haven't configured your clusters with the required IPs, use the links below to complete the configuration for the clusters listed: @@ -54,7 +55,8 @@ If you haven't configured your clusters with the required IPs, use the links bel [GKE (Google Kubernetes Engine)](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters){:target="\_blank"} -### What to read next -[Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/) -[Install hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/) -[Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/) \ No newline at end of file +## What to read next +[Codefresh Runner installation]({{site.baseurl}}/docs/installation/codefresh-runner/) +[Set up a Hosted GitOps Runtime]({{site.baseurl}}/docs/installation/hosted-runtime/) +[Install Hybrid GitOps Runtimes]({{site.baseurl}}/docs/runtime/hybrid-gitops/) + \ No newline at end of file diff --git a/_docs/administration/add-users.md b/_docs/administration/add-users.md deleted file mode 100644 index be103373..00000000 --- a/_docs/administration/add-users.md +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: "Manage users" -description: "" -group: administration -toc: true ---- - -If you have a Codefresh account, you can add any number of users to collaborate on repositories, workflows, and pipelines. - -Adding a user requires assigning a role, and optionally, an SSO provider: - -**Role**: Defines the user's access level, and is by default set to User. The Administrator role has full access to account settings. -For guidelines on access control, see [Access control]({{site.baseurl}}/docs/administration/access-control/). - -**SSO**: By default, SSO is not enabled for users. If required, explicitly select the SSO provider. For an overview of SSO, see [Single Sign on]({{site.baseurl}}/docs/administration/single-sign-on/). - -### Add a user -1. In Codefresh, click **Account Settings**. -1. From the sidebar, select [Collaboration](https://g.codefresh.io/2.0/account-settings/users){:target="\_blank"}. - - {% include - image.html - lightbox="true" - file="/images/administration/users/users-list.png" - url="/images/administration/users/users-list.png" - alt="Users list" - caption="Users list" - max-width="40%" - %} - -{:start="3"} -1. Select **Users**, and then select **+ [Add User]**. - - {% include - image.html - lightbox="true" - file="/images/administration/users/invite-user.png" - url="/images/administration/users/invite-user.png" - alt="Add new user" - caption="Add new user" - max-width="40%" - %} - - 1. Type the **User's email address**. - 1. **Assign a role**, by selecting either **User** or **Administrator**. - 1. If SSO is configured for the account, **Select SSO provider**. - - -The user receives an email invitation, and the Users page is updated with information on the user. -The Status column shows Invite sent until the user accepts the invitation, when the user account is created. - -> For invitations pending accept, select ![](/images/administration/users/icon-Send.png?display=inline-block) **Resend invite**. - To edit account settings, select ![](/images/administration/users/icon-Edit.png?display=inline-block) **Edit**. - To remove the user account, select ![](/images/administration/users/icon-Delete.png?display=inline-block) **Delete**. - - -### Troubleshoot add users -[User is prompted to enter an organization name](https://support.codefresh.io/hc/en-us/articles/360020177959-User-is-prompted-to-enter-an-organization-name) -[Account invitation not permitting login](https://support.codefresh.io/hc/en-us/articles/360015251000-Account-invitation-not-permitting-login) - diff --git a/_docs/administration/audit.md b/_docs/administration/audit.md deleted file mode 100644 index eca08759..00000000 --- a/_docs/administration/audit.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Audit" -description: "" -group: administration -toc: true ---- - -Most entities in Codefresh are GitOps-compliant, and fully controlled via the GitOps approach. - -For information on which entities and how they are controlled, review [access control]({{site.baseurl}}/docs/administration/access-control/). - -Audit logs are available for GitOps-compliant entities. - -View audit logs: - -* Of Git Sources, in the **Notifications** panel -* Of pipeline entities, in the **Update History** tab -* In your Git repository - -### Git Source changes in Notifications -The **Notifications** panel is a pull-down panel, always available in the Codefresh toolbar. The panel shows a recent view of changes to entities such as Git Sources. - - -{% include -image.html -lightbox="true" -file="/images/administration/audit/notifications.png" -url="/images/administration/audit/notifications.png" -alt="Git Sources change log in Notifications" -caption="Git Sources change log in Notifications" -max-width="30%" -%} - -### Pipeline entity changes in Update History -When you drill down into a pipeline, the **Update History** tab shows the list of changes to all its underlying entities. - -{% include -image.html -lightbox="true" -file="/images/administration/audit/update-history.png" -url="/images/administration/audit/update-history.png" -alt="Pipeline entity change log in Update History" -caption="Pipeline entity change log in Update History" -max-width="30%" -%} - - -### Git repo change log - -A change to a GitOps-controlled resource in Codefresh is made by Codefresh impersonating and pushing commits to your Git Sources. -The Git repository linked to the Git Source shows all the commits. - - -### (Future) Centralized audit log in account settings -We plan to create a centralized location from which to view all API operations. - diff --git a/_docs/administration/single-sign-on/sso-azure.md b/_docs/administration/single-sign-on/sso-azure.md deleted file mode 100644 index 95e790fc..00000000 --- a/_docs/administration/single-sign-on/sso-azure.md +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: "Azure Single Sign-On (SSO)" -description: " " -group: administration -sub_group: single-sign-on -redirect_from: - - /docs/enterprise/single-sign-on/sso-azure/ -toc: true ---- - -Setting up SSO for Azure in Codefresh, requires you to register Codefresh in Azure AD with the required permissions and the client secret, configure the SSO settings in Codefresh, and then define the Client ID in Azure AD. -For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). - -### Prerequisites -* Azure user roles: *Application Administrator* or *Global Administrator* roles. - These roles are required after the SSO integration is complete to [sync teams from Azure to Codefresh]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). - - -### Register Codefresh in Azure AD -Register the Codefresh application in Azure AD. - -1. Log in to **Azure Portal**, and from the sidebar, select **Azure Active Directory**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/1-azure-service.png" - url="/images/administration/sso/azure/1-azure-service.png" - alt="Azure Active Directory" - caption="Azure Active Directory" - max-width="30%" - %} - -{:start="2"} -1. From the sidebar, select **App registrations**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/2-app-registrations.png" - url="/images/administration/sso/azure/2-app-registrations.png" - alt="Azure App Registrations" - caption="Azure App Registrations" - max-width="30%" - %} - -{:start="3"} -1. To add the new application, select **+ New registration**. - Enter a name for the application, e.g. Codefresh, and for all other options, retain default settings. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/3-register-an-app.png" - url="/images/administration/sso/azure/3-register-an-app.png" - alt="Azure App Registration creation" - caption="Azure App Registration creation" - max-width="30%" - %} -{:start="4"} -1. To apply your changes, select **Register**. The application is now registered in Azure AD. - - -### Configure permissions for Codefresh - -After registering Codefresh, configure the permissions. - -1. Select the application name to open **Settings**. -1. Select **API permissions**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/5-api-permissions.png" - url="/images/administration/sso/azure/5-api-permissions.png" - alt="Azure App API Permissions" - caption="Azure App API Permissions" - max-width="30%" - %} -{:start="3"} -1. To change access levels, select **Add a permission**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/6-request-api-permissions.png" - url="/images/administration/sso/azure/6-request-api-permissions.png" - alt="Azure App Change Permissions" - caption="Azure App Change Permissions" - max-width="30%" - %} -{:start="4"} -1. Find and select **Azure Active Directory Graph**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/7-request-api-permissions.png" - url="/images/administration/sso/azure/7-request-api-permissions.png" - alt="Azure Active Directory Graph entry" - caption="Azure Active Directory Graph entry" - max-width="30%" - %} -{:start="5"} -1. Select **Application permissions**, and select the following permissions: - * `Directory.Read.All` - * `Group.Read.All` - * `User.Read.All` - - >Note: - User.Read for the type of delegated is required. This permission is usually added by default. - -{:start="6"} -1. Select **Apply Permissions**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/ApplicationPermissions.png" - url="/images/administration/sso/azure/ApplicationPermissions.png" - alt="API Permissions" - caption="API Permissions" - max-width="30%" - %} - -{:start="7"} -1. From the bar on the top, select **Grant admin consent**. - -### Create Client Secret - -1. From the sidebar, select **Certificates & secrets**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/9-Create-secret-page.png" - url="/images/administration/sso/azure/9-Create-secret-page.png" - alt="Change keys" - caption="Change keys" - max-width="30%" - %} -{:start="2"} -1. Select **New Client secret**, and add a description (arbitrary name). - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/10-Add-client-secret.png" - url="/images/administration/sso/azure/10-Add-client-secret.png" - alt="Add a client secret" - caption="Add a client secret" - max-width="30%" - %} -{:start="3"} -1. Select the desired duration. - >**Important:** If you select a key with an expiration date, record the expiration date in your calendar. Remember to renew the key before the expiration date to ensure that users don't experience a service interruption. -1. To display the key, select **Add**. -1. Copy the value of the key as you will need this when you configure the SSO settings for Azure in Codefresh. - -### Configure SSO for Azure in Codefresh - -1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. -1. Select **+ Add Single Sign-On**, select **Azure**, and then select **Next**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/sso-csdp-azure.png" - url="/images/administration/sso/azure/sso-csdp-azure.png" - alt="SSO settings for Azure in Codefresh" - caption="SSO settings for Azure in Codefresh" - max-width="30%" - %} - - * **Client Name**: For auto-generation, leave empty. Codefresh generates the client name once you save the settings. - * **Display Name**: Meaningful name that identifies the SSO provider. - * **Application ID**: The Application ID in Azure - * **Client secret**: The key value you copied when you created the client secret in Azure - * **Tenant**: `.onmicrosoft.com` - * **Object ID**: Your Azure Service Principal Object ID (from Enterprise Application configuration) - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/client-name.png" - url="/images/administration/sso/azure/client-name.png" - alt="SSO Client Name" - caption="SSO Client Name" - max-width="50%" - %} - You need this value when you configure the reply URL in the Azure portal. - -### Configure reply URLs -This is the final step in SSO setup for Azure. Add the Codefresh callback URL to the allowed reply URLs for the created application in Azure AD. -1. Go to **Azure Active Directory > Apps registrations**, and select your app. -1. Select **Add a Redirect URI**, and define: - - ``` - https://g.codefresh.io/api/auth//callback - - ``` - - where: `` is the Client Name in the SSO configuration, either defined by you or created by Codefresh. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/12-set-reply-URL.png" - url="/images/administration/sso/azure/12-set-reply-URL.png" - alt="Reply URLs" - caption="Reply URLs" - max-width="30%" - %} -{:start="3"} -1. On the same page, scroll down and select **ID tokens**. - - {% include image.html - lightbox="true" - file="/images/administration/sso/azure/13-Enable-ID-Tokens.png" - url="/images/administration/sso/azure/13-Enable-ID-Tokens.png" - alt="Reply URLs" - caption="Reply URLs" - max-width="30%" - %} - -You have now completed the SSO setup for Azure. - -##### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. diff --git a/_docs/administration/single-sign-on/sso-setup-oauth2.md b/_docs/administration/single-sign-on/sso-setup-oauth2.md deleted file mode 100644 index 93d4f5fc..00000000 --- a/_docs/administration/single-sign-on/sso-setup-oauth2.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: "Setting Up OpenID Connect Federated Single Sign-On (SSO)" -description: "" -group: administration -sub_group: single-sign-on -redirect_from: - - /docs/sso/sso-setup-oauth2/ - - /docs/enterprise/single-sign-on/sso-setup-oauth2/ -toc: true ---- - -Codefresh natively supports login using GitHub, Bitbucket and GitLab using the OpenID Connect (OAUTH 2.0) protocol. You can add new SSO integrations based on OAUTH 2.0 as part of the Codefresh Enterprise plan. - - -### Prerequisites - -To successfully add an identity provider in Codefresh, you must configure settings both for the identity provider and in Codefresh. -You need to: - -1. Configure your identity provider to provide SSO services to Codefresh. The configuration differs per identity provider. -1. Set up Codefresh to point to your identity provider, common for all identity providers. - -> SSO is only available to Enterprise customers. Please [contact sales](https://codefresh.io/contact-sales/) in order to enable it for your Codefresh account. - -### SSO configuration using OAuth2 - -SSO configuration in Codefresh is similar regardless of the identity provider selected. These settings are common to all providers: - -* **Display Name**: The name of your identity provider -* **Client ID**: The ID used for the connection -* **Client Secret**: The secret associated with the ID - -For detailed information on how to configure SSO for your identity provider, see the following: - -[Azure]({{site.baseurl}}/docs/administration/single-sign-on/sso-azure/) -[Google]({{site.baseurl}}/docs/administration/single-sign-on/sso-google/) -[Okta]({{site.baseurl}}/docs/administration/single-sign-on/sso-okta/) -[OneLogin]({{site.baseurl}}/docs/administration/single-sign-on/sso-onelogin/). - - -### Test SSO with your identity provider - -Once you configure SSO for your identity provider, do the following: -1. On the sidebar, below **User Management**, select **People**. -1. Add an active user for testing purposes. We recommend you use your own user. -1. Change Login method by selecting your Auth provider in the SSO drop-down. - - {% include image.html -lightbox="true" -file="/images/administration/sso/collaborators.png" -url="/images/administration/sso/collaborators.png" -alt="Adding collaborators" -caption="Adding collaborators" -max-width="30%" -%} - -{:start="3"} -1. Keep the current browser session open, and log in via Corporate SSO in an incognito tab (or another browser). - - {% include image.html -lightbox="true" -file="/images/administration/sso/sign-with-sso.png" -url="/images/administration/sso/sign-with-sso.png" -alt="Sign-in with SSO" -caption="Sign-in with SSO" -max-width="50%" -%} - -{:start="4"} -1. If everything works as expected, add more users. - ->Before enabling SSO for all users, you **MUST** make sure that it works for the test user. Once SSO is enabled for a user, Codefresh blocks logins through other IDPs for this user, and only allows login through the enabled SSO. If the selected SSO method does not work for some reason, the user is locked out of Codefresh. - - -## Select SSO method for collaborators - -To add users and select their SSO method, from the sidebar, select **Collaborators**. Then add the user's email or Codefresh username. -In addition to their role, you can now select the SSO method to use: - - {% include image.html -lightbox="true" -file="/images/administration/sso/select-user-sso.png" -url="/images/administration/sso/select-user-sso.png" -alt="Selecting SSO method" -caption="Selecting SSO method" -max-width="50%" -%} - -**SSO login for new and existing users** -If you have multiple SSO providers configured, you can select a different provider for each user if so required. - -* New users - If you have an SSO provider selected as the default, that provider is automatically assigned to new users, added either manually or via team synchronization. - -* Existing users - SSO login is not configured by default for existing users. You must _explicitly select_ the SSO provider for existing users. - If SSO login is already configured for an existing user, and you add a new identity provider, to change the SSO login to the new provider, you must _select_ the new provider for the user. - - -### Define a default identity provider - -If you have multiple identity providers for SSO, you can define one of them as your default provider. -When you define a default provider: -* The SSO method is automatically selected for all newly invited users -* All new users receive an email with an invite link that points directly to the login page of that SSO provider - - -1. Mouse over the top-right of the SSO screen - - {% include image.html -lightbox="true" -file="/images/administration/sso/default-sso.png" -url="/images/administration/sso/default-sso.png" -alt="Default SSO provider" -caption="Default SSO provider" -max-width="90%" -%} - -### Sync teams after initial SSO setup - -Once the initial setup is done, you can also sync your teams between Codefresh and the identity provider. -You can do this via the [Codefresh Cli](https://codefresh-io.github.io/cli/), using the [sync command](https://codefresh-io.github.io/cli/teams/synchronize-teams/). - -For example, to sync you azure teams you can execute: - -``` -codefresh synchronize teams my-client-name -t azure - -``` - -You can find the client-name from the SSO UI. - -{% include image.html -lightbox="true" -file="/images/administration/sso/azure/client-name.png" -url="/images/administration/sso/azure/client-name.png" -alt="SSO Client Name" -caption="SSO Client Name" -max-width="40%" -%} - -Even though you can run this command manually, it makes more sense to run it periodically as a job. And the obvious -way to perform this is with a Codefresh pipeline. The CLI can be used as a [freestyle step]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/). - -You can create a git repository with a [codefresh.yml]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) file with the following contents: - -`YAML` -{% highlight yaml %} -{% raw %} -version: '1.0' -steps: - syncMyTeams: - title: syncTeams - image: codefresh/cli - commands: - - 'codefresh synchronize teams my-client-name -t azure' -{% endraw %} -{% endhighlight %} - -To fully automate this pipeline, set a [cron trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/cron-triggers/) for this pipeline. The cron-trigger is responsible for running this pipeline, (and therefore synchronizing the teams), in a fully automated manner. -This way you can synchronize your teams every day/week/hour depending on you cron trigger setup. - diff --git a/_docs/administration/single-sign-on/sso-setup-saml2.md b/_docs/administration/single-sign-on/sso-setup-saml2.md deleted file mode 100644 index 3e674085..00000000 --- a/_docs/administration/single-sign-on/sso-setup-saml2.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: "Setting Up SAML2 Federated Single Sign-On (SSO)" -description: "" -group: administration -sub_group: single-sign-on -redirect_from: - - /docs/sso/sso-setup-saml2/ - - /docs/enterprise/single-sign-on/sso-setup-saml2/ -toc: true ---- - -Codefresh natively supports login using GitHub, Bitbucket and GitLab using the OpenID Connect (OAUTH 2.0) protocol. You can add new SSO integrations based on OAUTH 2.0 as part of Codefresh Enterprise plan. - -As Identity Providers (IdPs) come in all shapes and sizes, the following topic discusses in general what you must do to configure Federated SSO. - As you will see in the description below, the person in your organization responsible for managing your IdP will need to interact with Codefresh support team to successfully set up a trust between your IdP and Codefresh SP. - -{:.text-secondary} -### Before you set up Federated SSO - 1. Have your account set up with Codefresh enterprise plan. - 2. Ensure you have a working SAML 2.0 compliant identity provider (IdP). - 3. Identify someone in your organization who is familiar with configuring and managing your organization's IdP. - 4. Ensure that your IdP's system clock is synchronized with a reliable time source. If it's not, tokens generated will be unusable and SSO will fail. - -{:.text-secondary} -### Summary of Federated SSO setup - -{% include image.html - lightbox="true" - file="/images/sso-flow.png" - url="/images/sso-flow.png" - alt="sso-flow.png" - max-width="100%" -%} - -{:.text-secondary} -### SAML attributes - -Codefresh expects the following user attributes to be passed through SAML between your IdP and Codefresh SP: - - User email address - - User first name - - User last name - - User full name - - User unique ID that isn't subject to change in your identity management environment - -{:.text-secondary} -## How does the connection process work? - - {% include image.html -lightbox="true" -file="/images/sso-diagram.png" -url="/images/sso-diagram.png" -alt="sso-diagram.png" -max-width="100%" - %} - -Once Federated SSO has been configured, the process works as follows: - -
- - Steps 2 to 7 occur in the background and are transparent to the user. -
- -1. A user logs in to CDSP -2. The user is redirected to Codefresh Service Provider (SP) to initiate SSO -3. The user’s browser is then redirected to the customer IdP -4. Once authenticated by the corporate side, a SAML token is sent to the user’s browser -5. The SAML assertion is then forwarded to Codefresh SP -6. If you are a valid Codefresh user for this SSO connection, an SSO token is returned to the user’s browser -7. The user’s browser then returns a token to Codefresh and access is granted for your account - -### Configure SAML SSO settings in Codefresh - -1. In Codefresh, select **Account settings**. -1. From the sidebar expand **Collaboration**, and select **Single Sign-on**. - OR - Go directly to [https://g.codefresh.io/account-admin/sso](https://g.codefresh.io/account-admin/sso)) - - - {% include image.html - lightbox="true" -file="/images/administration/sso/add-sso-dropdown.png" -url="/images/administration/sso/add-sso-dropdown.png" -alt="SSO provider settings" -caption="SSO provider settings" -max-width="70%" -%} - -{:start="3"} -1. Select **Add single-sign-on**, and then select **SAML**. -1. Enter the following: - - * **Client Name**: For auto-generation, leave empty. Codefresh generates the client name once you save the settings. - * **Display Name**: The name you want to give to this integration. - * **IDP Entry**: The SSO endpoint of your Identity Provider. For Azure SAML, for example, this is the Login URL. - * **Application Certificate**: The security certificate of your Identity Provider. Paste the value directly in the field. Do not convert to base64 or any other encoding by hand. (For Azure SAML, this will be Certificate (Base64) and the value needed is between the -----BEGIN ... and -----END... from the downloaded cert) - * **Assertion URL**: `https://g.codefresh.io/api/auth//callback​` - where ​​ is he client name that is automatically generated when saving the SSO settings. - * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. -1. Select **Save**, and note down the `Client Name` that is generated. - - -### Configure IdP settings for Codefresh as a Service Provider -In the settings of your Identity Provider, create a new Service Provider and provide the following: - - * **Service Provider SSO Endpoint**: Assertion consumer service URL - `https://g.codefresh.io/api/auth//callback` - * **Service Provider Entity ID**: `g.codefresh.io` - -The mandatory fields needed for SAML assertions are: -1. firstName: User's first name -1. lastName: User's last name -1. email: User's email - -To configure users sync for SAML IDP, do the following: - -1. Select a G Suite provider -1. Enable Auto Sync users and teams to Codefresh -1. Set JSON Keyfile, Admin Email and Sync interval - -The instructions for getting the JSON Keyfile, and Admin Email are the same as for [Google SSO]({{site.baseurl}}/docs/administration/single-sign-on/sso-google/#synchronize-teams-with-the-codefresh-cli). - ->Note - These settings are for the SaaS version of Codefresh. For an on-premises setup, use the URLs that match your installation. - -Once everything is finished, you [should test the integration]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider). Once it's working, proceed to the next steps that are: - -* [Selecting SSO method for collaborators]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#selecting-sso-method-for-collaborators) - ->Notice that Codefresh has an internal cache for SSO configurations and it might take up to five minutes for your changes to take effect. - -## OneLogin SAML Setup - -1. In OneLogin, go to the [Applications](https://cfsupport.onelogin.com/apps) Section. -1. Select 'Add App' on the top right. -1. Search for 'SAML Custom Connector' (advanced) and select it. -1. Add a Display Name (the rest is optional) and Save. -1. View the SSO Section. -1. Open a New Tab and go to the [Single Sign-On](https://g.codefresh.io/account-admin/sso) settings in Codefresh. -1. In Codefresh, select SAML for the Add Single Sign-On. - * Display Name = any arbitrary name you want to give in this integration. - * IDP Entry = SAML 2.0 Endpoint (HTTP) from the SSO section in OneLogin. - * Application Certificate = X.509 Certificate from the SSO section in OneLogin. - * Click View Details (preferable open in a new tab). - * Under X.509 Certificate, click the copy button. - * Paste the contents into the Application Certificate. - * Remove the -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----. - * Save. -1. Click edit on the SAML integration we created. - * Copy the Assertion URL -1. In OneLogin, view the Configuration section. - * Audience (EntityID) = g.codefresh.io - * Recipient = Assertion URL - * ACS (Consumer) URL Validator= Assertion URL but in Regex form. View OneLogin's [Setup Page](https://onelogin.service-now.com/support?id=kb_article&sys_id=c89fefdadb2310503de43e043996195a&kb_category=93e869b0db185340d5505eea4b961934) for more info. - * ACS (Consumer) URL = Assertion URL - * Login URL = https://g.codefresh.io/login - * SAML Initiator = Service Provider - * Save -1. In OneLogin, Go the [Users](https://cfsupport.onelogin.com/users) page. - * Select the User - * Go to Applications Section - * Click the **+** to add - * Select the SAML App (will show the Display Name from step 7) - * Click Continue - * Make sure NameID value = email address - * Save - -> Once the configuration is complete, please test the integration before enabling the SSO for all users. - - - - - - diff --git a/_docs/administration/user-settings.md b/_docs/administration/user-self-management/manage-pats.md similarity index 57% rename from _docs/administration/user-settings.md rename to _docs/administration/user-self-management/manage-pats.md index c8f130c5..d4fb426c 100644 --- a/_docs/administration/user-settings.md +++ b/_docs/administration/user-self-management/manage-pats.md @@ -1,57 +1,25 @@ --- -title: "User settings" +title: "Managing Git PATs" description: "" group: administration +sub_group: user-self-management toc: true --- -As a user in Codefresh, you can manage your account by authorizing access to your Git provider accounts, and optionally, enabling access for Codefresh support. +As a user in Codefresh, you must authorize access to your Git provider accounts, and authenticate Git-based actions from Codefresh clients, per provisioned runtime. +The authorization method depends on the Git provider and on what authorization has been set up by your account admin. +* If your admin has set up authentication with OAuth2, you can authorize access using OAuth2. +* You can always generate a personal access token from your Git provider and then add the same to Codefresh to authorize access. -* Enable access for Codefresh support - Optional. Enable access to your account for troubleshooting purposes. - -* Authorize Git providers - The Git personal token is a user-specific access token, required to authenticate Git-based actions from Codefresh clients, per provisioned runtime. - - - The authorization method depends on the Git provider and on what authorization has been set up by your account admin. - - - If your admin has set up authentication with OAuth2, you can authorize access using OAuth2. - Or, you can always generate a personal access token from your Git provider and then add the same to Codefresh to authorize access. - - > If you have access to more than one runtime, you can use the same token for multiple runtimes. - You must however authorize access individually for each runtime. +> If you have access to more than one runtime, you can use the same token for multiple runtimes. + You must however authorize access individually for each runtime. {::nomarkdown}
{:/} -### Enable access for Codefresh support -Enable Codefresh support personnel to access your user account. Access to your account is useful for visibility during troubleshooting. - -You can disable this security setting at any time. - -> Codefresh personnel takes action only after confirmation from you, and all actions are audited. - -1. In the CSDP UI, go to [User Settings](https://g.codefresh.io/2.0/user-settings){:target="\_blank"}. -1. Enable **Allow Codefresh support tem to log in...**. -{% include - image.html - lightbox="true" - file="/images/administration/user-settings/security-enable-support-access.png" - url="/images/administration/user-settings/security-enable-support-access.png" - alt="Enable access for Codefresh support" - caption="Enable access for Codefresh support" - max-width="50%" -%} - -{::nomarkdown} -
-{:/} - -### Authorize Git access in Codefresh +## Authorize Git access in Codefresh Authorize Git access with OAuth2 if your account admin has set up Codefresh as an OAuth application, or alternatively through personal access tokens from your Git provider. >Notes: For OAuth2: The adminstrator pre-configures the permissions and expiry date. Once you supply your credentials for authorization, you are automatically directed to the Git Personal Tokens page. @@ -67,7 +35,7 @@ Make sure you have: **How to** -1. In the Codefresh UI, go to [User Settings](https://g.codefresh.io/2.0/user-settings){:target="\_blank"}. +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Git Personal Access Token** (TBD(https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}). 1. Select the runtime, and then do one of the following: * To add a token, select **Add Token**. * To update an existing token by replacing it with a new token, select **Update Token**. @@ -87,7 +55,7 @@ Make sure you have: -{:start="5"} +{:start="4"} 1. Click **Add Token**. In the Git Personal Access Tokens list, you can see that the new token is assigned to the runtime. @@ -95,7 +63,7 @@ Make sure you have:
{:/} -#### Generate GitHub personal access tokens +### Generate GitHub personal access tokens 1. Log in to your GitHub or GitHub Enterprise account. 1. Select **Settings > Developer Settings > Personal Access Tokens > Tokens (classic)**. @@ -107,8 +75,8 @@ Make sure you have: {% include image.html lightbox="true" - file="/images/administration/user-settings/github-pat-scopes.png" - url="/images/administration/user-settings/github-pat-scopes.png" + file="/images/administration/manage-pats/github-pat-scopes.png" + url="/images/administration/manage-pats/github-pat-scopes.png" alt="GitHub personal access token scopes" caption="GitHub personal access token scopes" max-width="50%" @@ -121,7 +89,7 @@ Make sure you have:
{:/} -#### Generate GitLab personal access tokens +### Generate GitLab personal access tokens 1. Log in to your GitLab Cloud or Server account. 1. Select **User settings > Access tokens**. @@ -133,8 +101,8 @@ Make sure you have: {% include image.html lightbox="true" - file="/images/administration/user-settings/gitlab-pat-scopes.png" - url="/images/administration/user-settings/gitlab-pat-scopes.png" + file="/images/administration/manage-pats/gitlab-pat-scopes.png" + url="/images/administration/manage-pats/gitlab-pat-scopes.png" alt="GitLab personal access token scopes" caption="GitLab personal access token scopes" max-width="50%" @@ -150,7 +118,7 @@ Make sure you have:
{:/} -#### Generate Bitbucket personal access tokens +### Generate Bitbucket personal access tokens 1. Log in to your Bitbucket Cloud or Server account. @@ -164,8 +132,8 @@ Make sure you have: {% include image.html lightbox="true" - file="/images/administration/user-settings/bitbucket-pat-scopes.png" - url="/images/administration/user-settings/bitbucket-pat-scopes.png" + file="/images/administration/manage-pats/bitbucket-pat-scopes.png" + url="/images/administration/manage-pats/bitbucket-pat-scopes.png" alt="Bitbucket personal access token scopes" caption="Bitbucket personal access token scopes" max-width="50%" @@ -178,5 +146,5 @@ Make sure you have:
{:/} -### Related articles +## Related articles [Git tokens in Codefresh]({{site.baseurl}}/docs/reference/git-tokens/) \ No newline at end of file diff --git a/_docs/administration/user-self-management/user-settings.md b/_docs/administration/user-self-management/user-settings.md new file mode 100644 index 00000000..47743fb4 --- /dev/null +++ b/_docs/administration/user-self-management/user-settings.md @@ -0,0 +1,109 @@ +--- +title: "Manage personal user settings" +description: "Manage your personal settings" +group: administration +sub_group: user-self-management +toc: true +--- + +As a Codefresh user, you can manage several settings in your personal account, including: + +* Email notifications for builds and build usage +* Grant account access to Codefresh support +* Grant access to private Git repositories +* Create and manage API keys + +> To manage Git personal access tokens for GitOps, see [Managing PATs]({{site.baseurl}}/docs/administration/user-self-management/manage-pats). + +## Access user settings +* In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **User Settings** (https://g.codefresh.io/user/settings){:target="\_blank"}. + +## Email notifications for pipeline builds + +Configure the email notifications you want to receive for builds based on the build status: only successful, only failed, or for both successful and failed builds. + +> By default, email notifications for builds are disabled for _all users_. + +* In **Notifications**, define the email address and select the notifications: + * Email address for the notifications. By default, it's the same address you used to [sign up]({{site.baseurl}}/docs/administration/account-user-management/create-a-codefresh-account/). +* Select the build statuses for which to receive notifications. + + + +{% include image.html +lightbox="true" +file="/images/administration/user-settings/notifications.png" +url="/images/administration/user-settings/notifications.png" +alt="Email notifications for pipeline builds" +caption="Email notifications for pipeline builds" +max-width="50%" +%} + + + +## Weekly updates of build usage + +Select to receive weekly summaries of builds across your pipelines along with other statistical data. This information can be useful if you want to understand your overall project build health and capacity usage. + +* In **Updates**, select or clear **Receive updates...**. + + +## Enable access for Codefresh support + +Enable Codefresh support personnel to access your user account. Access to your account is useful for visibility during troubleshooting. If you have an issue with the Codefresh platform, our support personnel can log into your account and look at running builds, inspect Docker images, run pipelines for you etc. + +You can disable this security setting at any time. + +>Codefresh personnel takes action only after confirmation from you, and all actions are audited. + +* In **Security**, select **Allow Codefresh support team to log in…**.. + + +{% include image.html +lightbox="true" +file="/images/administration/user-settings/allow-support-access.png" +url="/images/administration/user-settings/allow-support-access.png" +alt="Allow access to Codefresh support" +caption="Allow access to Codefresh support" +max-width="100%" +%} + + + + +## Create and manage API keys + +Generate new API keys to access Codefresh functionality from your scripts or applications, outside the Codefresh UI. Edit scopes for existing keys, or revoke them when needed. +For details, see [Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions). + +>Tokens are visible only during creation. You cannot "view" an existing token. To re-enable API access for an existing application, you must delete the old token and create a new one. + + +1. In **API Keys**, to generate a new API key, click **Generate**. +1. Select the scopes for the key. + + +{% include image.html +lightbox="true" +file="/images/integrations/api/generate-token.png" +url="/images/integrations/api/generate-token.png" +alt="Generating a key for the API" +caption="Generating a key for the API" +max-width="80%" +%} + + +## Related articles + + +[Manage Git PATs]({{site.baseurl}}/docs/administration/manage-pats) +[Single Sign on]({{site.baseurl}}/docs/administration/single-sign-on/) + + diff --git a/_docs/ci-cd-guides/first-pipeline.md b/_docs/ci-cd-guides/first-pipeline.md deleted file mode 100644 index 83c252ef..00000000 --- a/_docs/ci-cd-guides/first-pipeline.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Create your first pipeline" -description: "" -group: ci-cd-guides -toc: true ---- - -Coming soon diff --git a/_docs/ci-cd-guides/gitops-deployments.md b/_docs/ci-cd-guides/gitops-deployments.md new file mode 100644 index 00000000..614f8b3f --- /dev/null +++ b/_docs/ci-cd-guides/gitops-deployments.md @@ -0,0 +1,688 @@ +--- +title: "GitOps deployments" +description: "Deploy with Codefresh and ArgoCD" +group: ci-cd-guides +toc: true +--- + +Apart from traditional push-based Helm deployments, Codefresh can also be used for [GitOps deployments](https://codefresh.io/gitops/). + +## What is GitOps + +GitOps is the practice of performing Operations via Git only. The main principles of GitOps are the following: + +* The state of the system/application is always stored in Git. +* Git is always the source of truth for what happens in the system. +* If you want to change the state of the system you need to perform a Git operation such as creating a commit or opening a pull request. Deployments, tests, and rollbacks controlled through git flow. +* Once the Git state is changed, then the cluster (or whatever your deployment target is) state should match what is described in the Git repository. +* No hand rolled deployments, no ad-hoc cluster changes, no live configuration changes are allowed. If a change needs to happen, it must be committed to Git first. + +GitOps deployments have several advantages compared to traditional imperative deployments. The main one is that the Git repo represents the state of the system, and Git history +is essentially the same thing as deployment history. Rollbacks are very easy to perform by simply using a previous Git hash. + +Even though GitOps is not specific to Kubernetes, current GitOps tools work great with Kubernetes in the form of cluster controllers. The GitOps controller monitors the state of the Git repository and when a commit happens, the cluster is instructed to match the same state. + +Codefresh has native support for GitOps including a graphical dashboard for handling your GitOps deployments: + +{% include image.html + lightbox="true" + file="/images/guides/gitops/gitops-dashboard.png" + url="/images/guides/gitops/gitops-dashboard.png" + alt="The GitOps dashboard" + caption="The GitOps dashboard" + max-width="100%" + %} + +This guide will explain how you can use GitOps for your own applications. + +## Setting up your Git Repositories + +One of the central ideas around GitOps is the usage of Git for ALL project resources. Even though developers are familiar with using Git for the source code of the application, adopting GitOps means that you need to store in Git every other resource of the application (and not just the source code). + +In the case of Kubernetes, this means that all Kubernetes manifests should be stored in a Git repository as well. In the most simple scenario you have the main repository of your application (this is mostly interesting to developers) and [a second Git repository with Kubernetes manifests](https://argoproj.github.io/argo-cd/user-guide/best_practices/#separating-config-vs-source-code-repositories) (this is more relevant to operators/SREs). + +As a running example you can use: + +* The [https://github.com/codefresh-contrib/gitops-app-source-code](https://github.com/codefresh-contrib/gitops-app-source-code) repository for the application code +* The [https://github.com/codefresh-contrib/gitops-kubernetes-configuration](https://github.com/codefresh-contrib/gitops-kubernetes-configuration) repository for the Kubernetes configuration +* The [https://github.com/codefresh-contrib/gitops-pipelines](https://github.com/codefresh-contrib/gitops-pipelines) repository that holds the pipelines + +The application code repository contains the source code plus a dockerfile. You can use any Git workflow for this repository. We will set a pipeline in Codefresh that creates a container image on each commit. + +The configuration repository holds the kubernetes manifests. This is one of the critical points of GitOps + +* The configuration repository holds the manifests that are also present in the Kubernetes cluster +* Every time a commit happens to the configuration repository the cluster will be notified to deploy the new version of the files (we will setup a pipeline for this) +* Every subsequent configuration change should become a Git commit. Ad-hoc changes to the cluster (i.e. with `kubectl` commands) are **NOT** allowed + +We also have a third Git repository for pipelines, because pipelines are also part of the application. + +Before continuing fork all 3 repositories in your own GitHub account if don't have already your own example application. + +## Connecting ArgoCD and Codefresh + +GitOps deployments are powered by [ArgoCD](https://argoproj.github.io/argo-cd/) so you need an active ArgoCD installation in your cluster to take advantage of the GitOps dashboard in Codefresh. + +Follow the instructions for [connecting ArgoCD to Codefresh]({{site.baseurl}}/docs/integrations/argocd/) and creating an ArgoCD application + +{% include image.html + lightbox="true" + file="/images/integrations/argocd/argocd-provision-app.png" + url="/images/integrations/argocd/argocd-provision-app.png" + alt="Creating a new ArgoCD application in a Codefresh environment" + caption="Creating a new ArgoCD application in a Codefresh environment" + max-width="40%" + %} + +The options are: + +* Name - User defined name of the Codefresh environment dashboard +* Project - A way to [group/secure applications](https://argoproj.github.io/argo-cd/user-guide/projects/). Choose default if you have only one project in ArgoCD. +* Application - name of application +* Manual/automatic sync - If automatic when a git commit happens, a deployment will automatically take place. +* Use schema - Kubernetes manifests will be checked for correctness before deployed to the cluster +* source repository - Git repository that holds your Kubernetes manifests +* revision - Revision to be checked out when a deployment happens +* path - folder inside the Git repository that should be searched for manifests (if your Git repo has multiple applications). Use `./` if all your manifests are in the root folder. +* cluster - Kubernetes cluster when deployment will take place +* namespace - Kubernetes namespace where the application will be deployed to +* directory recurse - whether to check all folders in the Git repository for manifests in a recursive way. + +For a sample application you can use the [https://github.com/codefresh-contrib/gitops-kubernetes-configuration](https://github.com/codefresh-contrib/gitops-kubernetes-configuration) repository. Fork the project in your own GitHub account and use that link in the *Source repository* section. + +Once you connect your application you will see it under in the GitOps application screen in the Codefresh UI. + +## Creating a basic CI Pipeline for GitOps + +Creating a CI pipeline for GitOps is no different than a [standard pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/) that [packages your Docker images]({{site.baseurl}}/docs/ci-cd-guides/building-docker-images/), runs [tests]({{site.baseurl}}/docs/testing/unit-tests/), performs [security scans]({{site.baseurl}}/docs/testing/security-scanning/) etc. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/basic-ci-pipeline.png" + url="/images/guides/gitops/basic-ci-pipeline.png" + alt="Basic CI pipeline" + caption="Basic CI pipeline" + max-width="100%" + %} + +To take advantage of the GitOps dashboard facilities you also need to setup the correlation between the Docker image and the Pull Requests/issues associated with it. This correlation happens via [annotations]({{site.baseurl}}/docs/codefresh-yaml/annotations/). The easiest way to annotate your image is by using the [pipeline plugins](https://codefresh.io/steps/) offered by Codefresh for this purpose. Currently we offer the following plugins: + +* [Record Pull Request information](https://codefresh.io/steps/step/image-enricher) +* [Record Jira Issue information](https://codefresh.io/steps/step/jira-issue-extractor) + +Here is an example Pipeline definition: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "metadata" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "my-github-username/gitops-app-source-code" + revision: '${{CF_REVISION}}' + stage: "clone" + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/simple-web-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + enrich-image: + title: Add PR info + type: image-enricher + stage: "metadata" + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:latest + BRANCH: '${{CF_BRANCH}}' + REPO: 'kostis-codefresh/simple-web-app' + GIT_PROVIDER_NAME: github-1 + jira-issue-extractor: + title: Enrich image with jira issues + type: jira-issue-extractor + stage: "metadata" + fail_fast: false + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:latest + JIRA_PROJECT_PREFIX: 'SAAS' + MESSAGE: SAAS-8431 + JIRA_HOST: codefresh-io.atlassian.net + JIRA_EMAIL: kostis@codefresh.io + JIRA_API_TOKEN: '${{JIRA_TOKEN}}' +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. Checks out the source code of an application with the [git-clone step]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) +1. [Builds]({{site.baseurl}}/docs/codefresh-yaml/steps/build/) a docker image +1. Annotates the Docker image with the Pull Request information provided by Github +1. Annotates the Docker image with a specific Jira issue ticket + +You can see the associated metadata in your [Docker image dashboard](https://g.codefresh.io/images/) + + {% include image.html + lightbox="true" + file="/images/guides/gitops/image-annotations.png" + url="/images/guides/gitops/image-annotations.png" + alt="Enriched Docker image" + caption="Enriched Docker image" + max-width="80%" + %} + +Codefresh is using this information to fill the deployment history in the GitOps dashboard. + +## Creating a basic CD Pipeline for GitOps + +To create a CD pipeline in Codefresh that is responsible for GitOps deployments you must first disable the auto-sync behavior of ArgoCD. You can disable auto-sync either from the GUI or via the [command line](https://argoproj.github.io/argo-cd/user-guide/auto_sync/): + + {% include image.html + lightbox="true" + file="/images/guides/gitops/disable-auto-sync.png" + url="/images/guides/gitops/disable-auto-sync.png" + alt="Basic CD pipeline" + caption="Basic CD pipeline" + max-width="80%" + %} + + With the auto-sync behavior disabled, all Git pushes that happen on the GitOps repo will be ignored by ArgoCD (however ArgoCD will still mark your application as out-of-sync). + + You can now [create a new pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/) in Codefresh using a [standard Git trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) that will monitor the GitOps repository for updates. This way Codefresh is responsible for the GitOps process instead of Argo. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/argo-sync-pipeline.png" + url="/images/guides/gitops/argo-sync-pipeline.png" + alt="Basic CD pipeline" + caption="Basic CD pipeline" + max-width="80%" + %} + +The big advantage here is that you can construct a full pipeline over the sync process with multiple steps before or after the sync. For example you could run some smoke tests after the deployment takes place. Here is an example pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "pre sync" + - "sync app" + - "post sync" + +steps: + pre_sync: + title: "Pre sync commands" + type: "freestyle" # Run any command + image: "alpine:3.9" # The image in which command will be executed + commands: + - echo "Sending a metrics marker" + stage: "pre sync" + sync_and_wait: + title: Sync ArgoCD app and wait + type: argocd-sync + arguments: + context: "argo-cd" + app_name: "${{ARGOCD_APP_NAME}}" + wait_healthy: true + stage: "sync app" + post_sync: + title: "Post sync commands" + type: "freestyle" # Run any command + image: "alpine:3.9" # The image in which command will be executed + commands: + - echo "running smoke tests" + stage: "post sync" +{% endraw %} +{% endhighlight %} + +The pipeline is using the [argo-sync plugin](https://codefresh.io/steps/step/argocd-sync) that can be used by Codefresh to start the sync process of an application from the Git repo to the cluster. + +The name of the `context` parameter should be the same name you used for your [ArgoCD integration]({{site.baseurl}}/docs/integrations/argocd/). + + {% include image.html + lightbox="true" + file="/images/guides/gitops/argo-context.png" + url="/images/guides/gitops/argo-context.png" + alt="Using the Argo integration name as a context" + caption="Using the Argo integration name as a context" + max-width="80%" + %} + +The name of the application should be the same name as the ArgoCD Application. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/argo-application-name.png" + url="/images/guides/gitops/argo-application-name.png" + alt="Argo Application name" + caption="Argo Application name" + max-width="80%" + %} + + You can use pipeline variables or any other familiar Codefresh mechanism such as [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/). + + Once the pipeline has finished running the sync status will updated in your GitOps dashboard to reflect the current state. + +## Working with the GitOps Dashboard + +After you create an ArgoCD application, you can click on it in the [GitOps environment overview](https://g.codefresh.io/gitops) and see the respective GitOps screen. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/real-dashboard.png" + url="/images/guides/gitops/real-dashboard.png" + alt="GitOps Dashboard" + caption="GitOps Dashboard" + max-width="100%" + %} + +This dashboard is the central place for monitoring your application and contains the following information: + +1. Current health and sync status +1. Deployment graph that shows successful/failed deployments on the selected time period +1. Complete history of deployments according to Git hash. For each deployment you can also see which Pull Request was used for the commit, who was the committer and which JIRA issues this Pull request is solving (provided that the image was built by a Codefresh pipeline) +1. The Kubernetes services that belong to this application (on the services tab) +1. What services and replicas were updated with each deployment. + +The deployment status is fetched from your ArgoCD integration in a live manner. If, at any point, the deployment is not synced with GIT, you will instantly see the out-of-sync status. You will get the number of resources that are out of sync. When you click the out-of-sync status, you will get a list of all resources in that status. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/out-of-sync.png" + url="/images/guides/gitops/out-of-sync.png" + alt="Out of sync status" + caption="Out of sync status" + max-width="60%" + %} + +For each Git hash Codefresh associates the respective Pull Request and Jira issue(s) that affected deployment. To achieve this correlation, Codefresh is enriching the Docker image(s) of the service during the CI process. + +You can manually create these annotations with the [standard Codefresh annotation support]({{site.baseurl}}/docs/codefresh-yaml/annotations/) or via the built-in pipeline steps that we will see in the next section. + +You can find helpful tips if you hover your mouse on the PR number, the issue, the Git commiter and so on. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/tooltips.png" + url="/images/guides/gitops/tooltips.png" + alt="Extra tooltip information" + caption="Extra tooltip information" + max-width="80%" + %} + +For each deployment you can also see a before/after view of the pods/replicas that were affected. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/updated-services.png" + url="/images/guides/gitops/updated-services.png" + alt="Updated services" + caption="Updated services" + max-width="100%" + %} + +### Filtering the Deployment History + +You can add filters on the deployment history by using the multi-select field on the top left of the screen. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/filter.png" + url="/images/guides/gitops/filter.png" + alt="Filtering options" + caption="Filtering options" + max-width="40%" + %} + + You can add filters for: + +* Git committer(s) +* Pull Request number(s) +* Jira issue(s) + + If you define multiple options they work in an OR manner. + +### Searching the Deployment History + +For advanced filtering options, the search field on the top right allows you to view only the subset of deployments that match your custom criteria. + +Apart from direct text search, the text field also supports a simple query language with the following keywords: + +* `issues` +* `issue` +* `prs` +* `pr` +* `committer` +* `committers` +* `service` +* `services` +* `image` +* `images` +* `status` +* `statuses` + +The following characters serve as delimiters + +* `:` define the value for a keyword +* `,` define multiple values for a single keyword +* `;` define multiple criteria + +{% include image.html + lightbox="true" + file="/images/guides/gitops/search.png" + url="/images/guides/gitops/search.png" + alt="Searching deployment history" + caption="Searching deployment history" + max-width="80%" + %} + +Some examples are: + +* `pr:2` - filter the deployment history to show only a specific Pull request +* `issues: SAAS-2111, SAAS-2222` - show only specific issues +* `issue: SAAS-2111; pr:3 ; service: my-app` - searching for multiple criteria in OR behavior + +Using the search field allows you to quickly find a specific Git commit in the history of the application (and even rollback the deployment as explained in the next sections). + +## Current State of Application + +The current state tab shows a hierarchical view of your cluster resource for your application. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/currentstate.png" + url="/images/guides/gitops/currentstate.png" + alt="Current State tab" + caption="Current State tab" + max-width="80%" + %} + +At the top of the screen you have several filters available: + +* Kind - choose a specific type of Kubernetes resource +* Health - status of the resource +* Sync state - GitOps status of the resource +* Free search - search any resource by name + +## Tagging GitOps Application + +1. Navigate to the GitOps dashboard. +2. To the application's right (next to the Health Column), click the three dots to open the More Action Dropdown. +3. Select Add/Edit Tags. +4. Click the +tags to add tags. +5. Alternatively, click the "x" next to the tag to remove it. +6. Click Save. + +## Rolling Back Git Versions + +In the GitOps dashboard you will also see a complete history of all past deployments as recorded in Git. You can select any of the previous versions and rollback your application to the respective version. + + {% include image.html + lightbox="true" + file="/images/guides/gitops/rollback.png" + url="/images/guides/gitops/rollback.png" + alt="Rolling back to a previous version" + caption="Rolling back to a previous version" + max-width="80%" + %} + +The Rollback simply informs the cluster to use a different git hash for the sync process. It doesn't affect your Git repository and ArgoCD will now show your application as out-of-sync (because the last Git commit no longer matches the status of the cluster). + +This rollback behavior is best used as an emergency measure after a failed deployment where you want to bring the cluster back to a previous state in a temporary manner. If you wish to keep the current rollback status as a permanent status it is best to use the standard `git reset/revert` commands and change the GitOps repository to its desired state. + +## Gitops ABAC Support For Rollback Action + +1. Go to Account Settings > Permissions > Teams Tab > Gitops. +2. Select the Team. +3. Chose what the Team can do and click apply. +4. Select the tags of the applications and click apply. +5. Click Add Rule when done. + +## Performing Automatic Git Commits + +Usually the Pull Requests that take part in a GitOps workflow are created and approved in a manual way (after code review). You have the option however to fully automate the whole process and rather than opening a Pull Request on both the application repository and the manifest repository, commit automatically the manifest changes inside the pipeline that creates the artifact. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/gitops-workflow.png" + url="/images/guides/gitops/gitops-workflow.png" + alt="Full GitOps workflow" + caption="Full GitOps workflow" + max-width="100%" + %} + +Here is an example pipeline that creates a Docker image and also commits a version change in the Kubernetes manifest to denote the new Docker tag of the application: + +{% include image.html + lightbox="true" + file="/images/guides/gitops/ci-cd-pipeline.png" + url="/images/guides/gitops/ci-cd-pipeline.png" + alt="Pipeline that commits manifests" + caption="Pipeline that commits manifests" + max-width="80%" + %} + +There are many ways to change a Kubernetes manifest in a programmatic way, and for brevity reasons we use the [yq](https://github.com/mikefarah/yq) command line tool. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "metadata" + - "gitops" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "my-github-username//gitops-app-source-code" + revision: '${{CF_REVISION}}' + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "kostiscodefresh/simple-web-app" + working_directory: "${{clone}}" + tags: + - "latest" + - '${{CF_SHORT_REVISION}}' + dockerfile: "Dockerfile" + stage: "build" + registry: dockerhub + enrich-image: + title: Add PR info + type: image-enricher + stage: "metadata" + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:${{CF_SHORT_REVISION}} + BRANCH: '${{CF_BRANCH}}' + REPO: 'kostis-codefresh/simple-web-app' + GIT_PROVIDER_NAME: github-1 + jira-issue-extractor: + title: Enrich image with jira issues + type: jira-issue-extractor + stage: "metadata" + fail_fast: false + arguments: + IMAGE: docker.io/kostiscodefresh/simple-web-app:${{CF_SHORT_REVISION}} + JIRA_PROJECT_PREFIX: 'SAAS' + MESSAGE: SAAS-8842 + JIRA_HOST: codefresh-io.atlassian.net + JIRA_EMAIL: kostis@codefresh.io + JIRA_API_TOKEN: '${{JIRA_TOKEN}}' + clone_gitops: + title: cloning gitops repo + type: git-clone + arguments: + repo: 'my-github-username//gitops-kubernetes-configuration' + revision: 'master' + stage: "gitops" + when: + branch: + only: + - master + change_manifest: + title: "Update k8s manifest" + image: "mikefarah/yq:3" # The image in which command will be executed + commands: + - yq w -i deployment.yml spec.template.spec.containers[0].image docker.io/kostiscodefresh/simple-web-app:${{CF_SHORT_REVISION}} + - cat deployment.yml + working_directory: "${{clone_gitops}}" + stage: "gitops" + when: + branch: + only: + - master + commit_and_push: + title: Commit manifest + type: git-commit + stage: "gitops" + arguments: + repo: 'my-github-username//gitops-kubernetes-configuration' + git: github-1 + working_directory: '/codefresh/volume/gitops-kubernetes-configuration' + commit_message: Updated manifest + git_user_name: ${{CF_COMMIT_AUTHOR}} + git_user_email: ${{CF_COMMIT_AUTHOR}}@acme-inc.com + when: + branch: + only: + - master +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. Checks out the Git repository that contains the source code +1. Builds a Docker image and tags it with the Git hash +1. Enriches the image with the Pull request and ticket information as explained in the previous sections +1. Checks out the Git repository that contains the Kubernetes manifests +1. Performs a text replacement on the manifest updating the `containers` segment with the new Docker image +1. Commits the change back using the [Git commit plugin](https://codefresh.io/steps/step/git-commit) to the Git repository that contains the manifests. + +The CD pipeline (described in the previous section) will detect that commit and use the [sync plugin](https://codefresh.io/steps/step/argocd-sync) to instruct ArgoCD to deploy the new tag. Alternatively you can setup the ArgoCD project to auto-sync on its own if it detects changes in the Git repository with the manifests. + +## Using the App-of-Apps pattern + +The GitOps dashboard has native support for the [app-of-apps pattern](https://argo-cd.readthedocs.io/en/stable/operator-manual/cluster-bootstrapping/). If you have a number of applications that are related and you always +install them as a set in your cluster you can group them in a single Application. The parent application can be defined using [declarative Argo Resources](https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/). + +As an example, you might find that you always install in your cluster Linkerd, Prometheus and Ambassador. You can group all of them in a single Application and deploy them all at once. + +You can find an existing example of app-of-apps at [https://github.com/argoproj/argocd-example-apps/tree/master/apps](https://github.com/argoproj/argocd-example-apps/tree/master/apps). It is using [Helm]({{site.baseurl}}/docs/yaml-examples/examples/helm/), but you can use any other Kubernetes templating mechanism such as [Kustomize]({{site.baseurl}}/docs/yaml-examples/examples/deploy-with-kustomize/) (or even plain manifests). + +Once you deploy the application with Codefresh, you will see the parent app in the dashboard with a small arrow: + +{% include image.html + lightbox="true" + file="/images/guides/gitops/app-of-apps-closed.png" + url="/images/guides/gitops/app-of-apps-closed.png" + alt="App of Apps" + caption="App of Apps" + max-width="90%" + %} + +You can expand the application by clicking on the arrow to inspect its child applications. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/app-of-apps.png" + url="/images/guides/gitops/app-of-apps.png" + alt="App of Apps expanded" + caption="App of Apps expanded" + max-width="90%" + %} + + Then you can either click on the parent application or any of the children to visit the respective dashboard. In the dashboard of the parent application, you will also be notified for its components after each deployment under the "Updated Applications" header: + + {% include image.html + lightbox="true" + file="/images/guides/gitops/updated-apps.png" + url="/images/guides/gitops/updated-apps.png" + alt="Children applications" + caption="Children applications" + max-width="90%" + %} + + Note that the app of apps pattern is best used for related but not interdependent applications. If you have applications that depend on each other (e.g. frontend that needs backend and backend that needs a DB) we suggest you use the standard [Helm dependency mechanism](https://helm.sh/docs/helm/helm_dependency/). + +## Integrating Codefresh and Jira + +> Note that Codefresh currently has to provide you with access to use the Jira Marketplace App. Please get in touch for more information. + +Setting up the Codefresh Jira integration provides + +* Higher observability of deployments within your GitOps Dashboard +* Higher observability of deployments within your Jira Account + +[Our integration section]({{site.baseurl}}/docs/integrations/jira) provides further details on ways to set-up the connection. + +Once set-up, you will be able to view information from Jira in the Codefresh GitOps Dashboard. Additionally, Jira will display + +* The build status across environments +* The deployment history +* Tickets and how they correlate to deployments + +The following screenshots show examples of the provided information. Here is the deployments details for a ticket in JIRA: + +{% include image.html +lightbox="true" +file="/images/integrations/jira/jira-integration-one.png" +url="/images/integrations/jira/jira-integration-one.png" +alt="Ticket deployment history" +caption="Ticket deployment history" +max-width="90%" +%} + +And here is a complete timeline of your deployments and the feature they contain. + +{% include image.html +lightbox="true" +file="/images/integrations/jira/jira-integration-two.png" +url="/images/integrations/jira/jira-integration-two.png" +alt="Jira Deployment timeline" +caption="Jira Deployment timeline" +max-width="90%" +%} + +For more information see the [Atlassian Codefresh page](https://www.atlassian.com/solutions/devops/integrations/codefresh) and the [integration documentation]({{site.baseurl}}/docs/integrations/jira/). + +## Using a Git repository for the pipelines + +Remember that according to GitOps we should place *all* application resources on Git. This means that the pipelines themselves must also be present in a Git repository and any change on them should pass from source control. + +Even though Codefresh has a [powerful inline editor]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#using-the-inline-pipeline-editor) for editing pipelines, as soon as you finish with your pipelines you [should commit them in Git](https://github.com/codefresh-contrib/gitops-pipelines) +and load them from the repository. + +{% include image.html + lightbox="true" + file="/images/guides/gitops/pipeline-from-git.png" + url="/images/guides/gitops/pipeline-from-git.png" + alt="Loading a pipeline from GIT" + caption="Loading a pipeline from GIT" + max-width="80%" + %} + + Once the pipeline is in Git, you should switch the online editor to [load the pipeline from the repository]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#loading-codefreshyml-from-version-control) instead of the inline text. + +## What to read next + +* [Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) +* [ArgoCD integration]({{site.baseurl}}/docs/integrations/argocd/) +* [Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +* [Helm promotions]({{site.baseurl}}/docs/new-helm/helm-environment-promotion/) diff --git a/_docs/clients/csdp-cli.md b/_docs/clients/csdp-cli.md deleted file mode 100644 index 2882c367..00000000 --- a/_docs/clients/csdp-cli.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: "Download CLI" -description: "" -group: clients -toc: true ---- - -You need the Codefresh CLI to install Codefresh runtimes. -* For the initial download, you also need to generate the API key and create the API authentication context, all from the UI. -* Subsequent downloads for upgrade purposes require you to only run the download command, using existing API credentials. - -### Download Codefresh CLI -Downloading the Codefresh CLI requires you to select the download mode and OS, generate an API key, and authentication context. -1. Do one of the following: - * For first-time installation, go to the Welcome page, select **+ Install Runtime**. - * If you have provisioned a hybrid/hosted runtime, in the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtime**. -1. Download the Codefresh CLI: - * Select one of the methods. - * Generate the API key and create the authentication context. - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-download-cli.png" - url="/images/getting-started/quick-start/quick-start-download-cli.png" - alt="Download CLI to install runtime" - caption="Download CLI to install runtime" - max-width="30%" - %} - -### Upgrade Codefresh CLI -Upgrade the CLI to the latest version to prevent installation errors. -1. Check the version of the CLI you have installed: - `cf version` -1. Compare with the [latest version](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"} released by Codefresh. -1. Select and run the appropriate command: - -{: .table .table-bordered .table-hover} -| Download mode | OS | Commands | -| -------------- | ----------| ----------| -| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| -| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | -| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | -| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| -| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`| - -### Related articles -[Set up hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime) -[Install hybrid runtimes]({{site.baseurl}}/docs/runtime/installation) diff --git a/_docs/deployment/applications-dashboard.md b/_docs/deployments/gitops/applications-dashboard.md similarity index 89% rename from _docs/deployment/applications-dashboard.md rename to _docs/deployments/gitops/applications-dashboard.md index 1e960976..51907f81 100644 --- a/_docs/deployment/applications-dashboard.md +++ b/_docs/deployments/gitops/applications-dashboard.md @@ -1,7 +1,8 @@ --- -title: "Monitoring applications" +title: "Monitoring GitOps applications" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- @@ -27,15 +28,15 @@ Monitor the current [health and sync status of applications](#identify-applicati * [Monitor deployments for selected application](#monitor-deployments-for-selected-application) * [Monitor services for selected application](#monitor-services-for-selected-application) ->For information on creating and managing applications and application resources, see [Creating applications]({{site.baseurl}}/docs/deployment/create-application/) and [Managing applications]({{site.baseurl}}/docs/deployment/manage-application/). +>For information on creating and managing applications and application resources, see [Creating applications]({{site.baseurl}}/docs/deployments/gitops/create-application/) and [Managing applications]({{site.baseurl}}/docs/deployments/gitops/manage-application/). -### Select view mode for the Applications dashboard +## Select view mode for the Applications dashboard View deployed applications in either List (the default) or Card views. Both views are sorted by the most recent deployments. 1. In the Codefresh UI, go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard/list){:target="\_blank"}. 1. Select **List** or **Cards**. -#### Applications List view +### Applications List view Here is an example of the Applications dashboard in List view mode. @@ -49,7 +50,7 @@ caption="Applications Dashboard: List view" max-width="60%" %} -#### Applications Card view +### Applications Card view Here is an example of the Applications dashboard in Card view mode. The Card view provides a scannable view of application data and the actions to manage applications. {% include @@ -62,18 +63,18 @@ caption="Applications Dashboard: Card view" max-width="60%" %} -### Applications dashboard information +## Applications dashboard information Here's a description of the information and actions in the Applications dashboard. {: .table .table-bordered .table-hover} | Item | Description | | -------------- | -------------- | |Application filters | Filter by a range of attributes to customize the information in the dashboard to bring you what you need. {::nomarkdown}
  • Application state
    A snapshot that displays a breakdown of the deployed applications by their health status.
    Click a status to filter by applications that match it.
    Codefresh tracks Argo CD's set of health statuses. See the official documentation on Health sets. .
  • Application attributes
    Attribute filters support multi-selection, and results are based on an OR relationship within the same filter with multiple options, and an AND relationship between filters.
    Clicking More Filters gives you options to filter by Health status, Cluster names, Namespace, and Type.
    • Application Type: Can be any of the following
      • Applications: Standalone applications. See the official documentation on Applications.
      • ApplicationSet: Applications created using the ApplicationSet Custom Resource (CR) template. An ApplicationSet can generate single or multiple applications. See the official documentation on Generating Applications with ApplicationSet.
      • Git Source: Applications created by Codefresh that includes other applications and CI resources. See Git Sources.
    • Labels:The K8s labels defined for the applications. The list displays labels of all the applications, even if you have applied filters.
      To see the available labels, select Add, and then select the required label and one or more values.
      To filter by the labels, select Add and then Apply.
      See the official documentation on Labels and selectors.
{:/}| -|{::nomarkdown}{:/}| Star applications as favorites and view only the starred applications.{::nomarkdown}
Select the to star the application as a favorite.

To filter by favorite applications, on the filters bar, select .
{:/} TIP: If you star applications as favorites in the Applications dashboard, you can filter by the same applications in the [DORA metrics dashboard]({{site.baseurl}}/docs/reporting/dora-metrics/#metrics-for-favorite-applications). | -|Application actions| Options to monitor/manage applications through the application's context menu. {::nomarkdown}
  • Quick view
    A comprehensive read-only view of the deployment and definition information for the application.
  • {:/}See [Application Quick View](#view-deployment-and-configuration-info-for-selected-application) in this article.{::nomarkdown}
  • Synchronize/Sync
    Manually synchronize the application.
  • {:/}See [Manually sync applications]({{site.baseurl}}/docs/deployment/manage-application/#manually-synchronize-an-application).{::nomarkdown}
  • Edit
    Modify application definitions.
  • {:/}See [Edit application definitions]({{site.baseurl}}/docs/deployment/manage-application/#edit-application-definitions).{::nomarkdown}
  • Refresh and Hard Refresh: Available in Card view only. In List view, you must first select the application.
    • Refresh: Retrieve desired (Git) state, compare with the live (cluster) state, and refresh the application to sync with the desired state.
    • Hard Refresh: Refresh the application to sync with the Git state, while removing the cache.
    {:/} | +|{::nomarkdown}{:/}| Star applications as favorites and view only the starred applications.{::nomarkdown}
    Select the to star the application as a favorite.

    To filter by favorite applications, on the filters bar, select .
    {:/} TIP: If you star applications as favorites in the Applications dashboard, you can filter by the same applications in the [DORA metrics dashboard]({{site.baseurl}}/docs/reporting/dora-metrics/#metrics-for-favorite-applications). | +|Application actions| Options to monitor/manage applications through the application's context menu. {::nomarkdown}
    • Quick view
      A comprehensive read-only view of the deployment and definition information for the application.
    • {:/}See [Application Quick View](#view-deployment-and-configuration-info-for-selected-application) in this article.{::nomarkdown}
    • Synchronize/Sync
      Manually synchronize the application.
    • {:/}See [Manually sync applications]({{site.baseurl}}/docs/deployments/gitops/manage-application/#manually-synchronize-an-application).{::nomarkdown}
    • Edit
      Modify application definitions.
    • {:/}See [Edit application definitions]({{site.baseurl}}/docs/deployments/gitops/manage-application/#edit-application-definitions).{::nomarkdown}
    • Refresh and Hard Refresh: Available in Card view only. In List view, you must first select the application.
      • Refresh: Retrieve desired (Git) state, compare with the live (cluster) state, and refresh the application to sync with the desired state.
      • Hard Refresh: Refresh the application to sync with the Git state, while removing the cache.
      {:/} | -### Identify applications with warnings/errors +## Identify applications with warnings/errors Errors are flagged in the **Warnings/Errors** button, displayed at the top right of the Applications dashboard. Clicking the button shows the list of applications with the warnings/errors and the possible reasons for these. {% include @@ -97,7 +98,7 @@ All errors are Argo CD-generated errors. Codefresh generates custom warnings for
      {:/} -#### Warning: Missing Rollouts reporter in cluster +### Warning: Missing Rollouts reporter in cluster **Reason**: Codefresh has detected that Argo Rollouts is not installed on the target cluster. Rollout instructions are therefore not executed and the application is not deployed. Applications with `rollout` resources need Argo Rollouts on the target cluster, both to visualize rollouts in the Applications dashboard and control rollout steps with the Rollout Player. @@ -108,7 +109,7 @@ Applications with `rollout` resources need Argo Rollouts on the target cluster,
      {:/} -#### Warning: Long sync +### Warning: Long sync **Reason**: Ongoing sync for application exceeds 30 minutes (Argo CD's default duration for a sync operation). **Corrective Action**: @@ -119,7 +120,7 @@ Applications with `rollout` resources need Argo Rollouts on the target cluster, * Drill down into the application to investigate the issue and make changes. -### View deployment and configuration info for selected application +## View deployment and configuration info for selected application View deployment, definition, and event information for the selected application in a centralized location through the Quick View. A read-only view, the Quick View displays information on the application state and location, labels and annotations, parameters, sync options, manifest, status and sync events. @@ -165,7 +166,8 @@ max-width="50%" -##### Quick View: Summary +### Quick View: Summary + Displays health, sync status, and source and destination definitions. {% include @@ -178,7 +180,9 @@ caption="Application Quick View: Summary" max-width="30%" %} -##### Quick View: Metadata + +### Quick View: Metadata + Displays labels and annotations for the application. {% include @@ -191,7 +195,9 @@ caption="Application Quick View: Metadata" max-width="30%" %} -##### Quick View: Parameters + +### Quick View: Parameters + Displays parameters configured for the application, based on the tool used to create the application's manifests. The parameters displayed differ according to the tool: `directory` (as in the screenshot below), `Helm` charts, or `Kustomize` manifests, or the specific plugin. @@ -205,7 +211,8 @@ caption="Application Quick View: Parameters" max-width="30%" %} -##### Quick View: Sync Options +### Quick View: Sync Options + Displays sync options enabled for the application. {% include @@ -218,7 +225,8 @@ caption="Application Quick View: Parameters" max-width="30%" %} -##### Quick View: Manifest +### Quick View: Manifest + Displays the YAML version of the application manifest. {% include @@ -231,7 +239,8 @@ caption="Application Quick View: Manifest" max-width="30%" %} -##### Quick View: Events +### Quick View: Events + Displays status and sync events for the application. {% include @@ -244,7 +253,7 @@ caption="Application Quick View: Events" max-width="30%" %} -### Monitor health and sync statuses for selected application +## Monitor health and sync statuses for selected application Monitor the health status of the selected application, the current sync status, and the result of the previous sync operation. Once you select an application, the quickest option to monitor statuses is through the application header which is always displayed, no matter what tab you navigate to. @@ -274,7 +283,7 @@ max-width="40%" You can also view the current health and sync status for the application as a resource in the Current State tab. -### Monitor resources for selected application +## Monitor resources for selected application Monitor the resources deployed in the current version of the selected application in the Current State tab. Selecting an application from the Applications dashboard takes you to the Current State tab, which as its title indicates, displays the @@ -302,7 +311,7 @@ You can view application resources in [List or Tree views](#view-modes-for-appli > To quickly see which resources have been added, modified, or removed for the current or for a specific deployment, switch to the Timeline tab and expand the deployment record to show Updated Resources. See [Monitor resource updates for deployments](#monitor-resource-updates-for-deployments). -#### View modes for application resources +### View modes for application resources The Current State tab supports Tree and List view formats. * Tree view (default): A hierarchical, interactive visualization of the application and its resources. Useful for complex deployments with multiple clusters and large numbers of resources. See also [Working with resources in Tree view](#working-with-resources-in-tree-view). @@ -335,7 +344,7 @@ max-width="50%" -##### Working with resources in Tree view +#### Working with resources in Tree view The Tree view is designed to impart key information at a glance. Review the sections that follow for pointers to quickly get to what you need in the Tree view. **Context menu** @@ -415,7 +424,7 @@ max-width="50%" %} -#### Filters for application resources +### Filters for application resources Filters are common to both Tree and List views, and when applied are retained when switching between views. `IgnoreExtraneous` is a filter in [Argo CD](https://argo-cd.readthedocs.io/en/stable/user-guide/compare-options){:target="\_blank"} that allows you to hide specific resources from the Current State views. These resources are usually generated by a tool and their sync statuses have no impact on the sync status of the application. For example, `ConfigMap` and `pods`. The application remains in-sync even when such resources are syncing or out-of-sync. @@ -450,7 +459,7 @@ max-width="50%" %} -#### Health status for application resources +### Health status for application resources View and monitor health status of the selected application's resources in the Current State tab, in Tree or List views. Identify the health of an application resource through the color-coded border and the resource-type icon (Tree view), or the textual labels at the right of the resource (List view). @@ -458,18 +467,20 @@ Identify the health of an application resource through the color-coded border an {: .table .table-bordered .table-hover} | Health status | Description | Display in Tree view | | -------------- | ------------| ------------------| -| **Healthy** | Resource is functioning as required. | {::nomarkdown}{:/} | -| **Progressing** | Resource is not healthy but can become healthy before the timeout occurs.| {::nomarkdown}{:/} | -| **Suspended** | Resource is not functioning, and is either suspended or paused. For example, Cron job or a canary rollout.| {::nomarkdown}{:/} | + +| **Healthy** | Resource is functioning as required. | {::nomarkdown}{:/} | +| **Progressing** | Resource is not healthy but can become healthy before the timeout occurs.| {::nomarkdown}{:/} | +| **Suspended** | Resource is not functioning, and is either suspended or paused. For example, Cron job or a canary rollout.| {::nomarkdown}{:/} | | **Missing** | Resource is not present on the cluster. |{::nomarkdown}{:/} | -| **Degraded** | Resource is not healthy, or a timeout occurred before it could reach a healthy status.| {::nomarkdown}{:/} | -| **Unknown** | Resource does not have a health status, or the health status is not tracked in Argo CD. For example,`ConfigMaps` resource types. | {::nomarkdown}{:/} | +| **Degraded** | Resource is not healthy, or a timeout occurred before it could reach a healthy status.| {::nomarkdown}{:/} | +| **Unknown** | Resource does not have a health status, or the health status is not tracked in Argo CD. For example,`ConfigMaps` resource types. | {::nomarkdown}{:/} | + See also [Argo CD's set of health checks](https://argo-cd.readthedocs.io/en/stable/operator-manual/health/){:target="\_blank"}. -#### Sync status for application resources +### Sync status for application resources Similar to the health status, the Current State also tracks the sync status of all application resources. The sync status identifies if the live state of the application resource on the cluster is synced with its desired state in Git. Identify the sync status through the icon on the left of the resource name and the color of the resource name (Tree view), or the textual labels at the right of the resource (List view). @@ -479,15 +490,17 @@ The table describes the possible sync statuses for an application resource, and {: .table .table-bordered .table-hover} | Sync state | Description |Display in Tree view | | -------------- | ---------- | ---------- | -| **Synced** | The live state of the resource on the cluster is identical to the desired state in Git.| {::nomarkdown}{:/} | -| **Syncing** | The live state of the resource was not identical to the desired state, and is currently being synced.| {::nomarkdown}{:/} | -| **Out-of-Sync** | {::nomarkdown}The live state is not identical to the desired state.
      To sync a resource, select the Sync option from the resource's context menu in Tree view. {:/}| {::nomarkdown}{:/} | -| **Unknown** | The sync status could not be determined. | {::nomarkdown}{:/} | + +| **Synced** | The live state of the resource on the cluster is identical to the desired state in Git.| {::nomarkdown}{:/} | +| **Syncing** | The live state of the resource was not identical to the desired state, and is currently being synced.| {::nomarkdown}{:/} | +| **Out-of-Sync** | {::nomarkdown}The live state is not identical to the desired state.
      To sync a resource, select the Sync option from the resource's context menu in Tree view. {:/}| {::nomarkdown}{:/} | +| **Unknown** | The sync status could not be determined. | {::nomarkdown}{:/} | + > The application header displays the statuses of the current and previous sync operations. Clicking **More** opens the Sync panels with Sync Info, Sync Result and Commit Info. The Application Warnings/Errors panel surfaces sync errors on exceeding the maximum number of retries and when a sync operation extends beyond 30 minutes. -#### Manifests for application resources +### Manifests for application resources In either Tree or List views, double-click an application resource to see its manifests. The manifests are displayed in the Summary tab. > Based on the selected resource type, you can also view logs, and events. Endpoints for example show only manifests, while pods show manifests, logs, and events. @@ -519,7 +532,7 @@ Here's what you can see and do in the Summary tab:
      {:/} -#### Logs for application resources +### Logs for application resources In either Tree or List views, double-click an application resource to see its logs. Logs are available only for resource types such as pods. {% include @@ -541,7 +554,7 @@ max-width="50%"
      {:/} -#### Events for application resources +### Events for application resources In either Tree or List views, double-click an application resource to see events in the Events tab. > If your runtime is lower than the version required to view events, you are notified to upgrade to the required version. @@ -563,7 +576,7 @@ max-width="50%" -### Monitor deployments for selected application +## Monitor deployments for selected application Monitor an ongoing deployment for the selected application, and review its historical deployments. The Timeline tab displays the history of deployments for the selected application, sorted by the most recent deployment (default), labeled **Current Version** at the top. @@ -600,7 +613,7 @@ caption="Applications Dashboard: Deployment chart" max-width="30%" %} -#### Monitor CI details by deployment +### Monitor CI details by deployment Each deployment record displays the complete CI history for that deployment. @@ -611,7 +624,7 @@ Each deployment record displays the complete CI history for that deployment. * The **Committer** who made the changes. -#### Monitor updated resources by deployment +### Monitor updated resources by deployment Each deployment record also identifies the resources that were changed (created, updated, or removed) as part of that deployment in **Updated Resources**. You can trace the history of a resource, from the original to their final versions. For each version, you can see the actual change or changes through the Diff view. The Full View shows the complete resource manifest, with the diff view of the changes, while the Compact View shows only those lines with the changes. > For detailed information on the current state of a resource, switch to the Current State tab and click the resource node. See [Monitoring application resources](#monitoring-application-resources). @@ -657,15 +670,15 @@ max-width="70%" -#### Monitor rollouts by deployment +### Monitor rollouts by deployment A rollout is initiated when there is an Argo CD sync due to a change in the desired state. Visualize ongoing and completed rollouts by deployments in **Services**. -> To view and manage a rollout, you must have an Argo `rollout` resource defined for your application, and [install Argo Rollouts in the cluster]({site.baseurl}}/docs/_docs/deployment/install-argo-rollouts). +> To view and manage a rollout, you must have an Argo `rollout` resource defined for your application, and [install Argo Rollouts in the cluster]({site.baseurl}}/docs/deployments/gitops/install-argo-rollouts). For detailed information on Argo Rollouts, see [Argo Rollouts documentation](https://argoproj.github.io/argo-rollouts/){:target="\_blank"}. -##### Rollout progress +#### Rollout progress For an ongoing rollout, the rollout bar displays the progress of the rollout. You can also visualize the steps in the rollout, and control the rollout using the options in the Rollout Player. Here is an example of an ongoing rollout for a canary deployment in Updated Services. The rollout comprising four steps has not started, and no traffic has not been routed as yet to the new version of the application. @@ -693,7 +706,7 @@ caption="Rollout completed for deployment" max-width="50%" %} -##### Manage ongoing rollout +#### Manage ongoing rollout Click the rollout name to visualize its steps. Manually manage the rollout through the controls in the Rollout Player. Here you can see that two out of four steps have been completed, 25% of the traffic has been routed, and the rollout has been paused for the defined length of time. @@ -720,7 +733,7 @@ The table lists the controls in the Rollout Player to manage an ongoing rollout. -##### View analysis run +#### View analysis run If you have defined an analysis template for the rollout, you can check the run results and the manifest. The result of an analysis run determines if the rollout is completed, paused, or aborted. For detailed information, see the [Analysis section in Argo Rollouts](https://argoproj.github.io/argo-rollouts/features/analysis/){:target="\_blank"}. @@ -749,7 +762,7 @@ max-width="50%" %} -### Monitor services for selected application +## Monitor services for selected application The Services tab shows the K8s services for each deployment of the application. Each service shows the number of replicas, the endpoint IP, the labels that reference the application, and the health status. @@ -765,6 +778,11 @@ caption="Applications Dashboard: Services tab" max-width="50%" %} +## Related articles +[Creating GitOps applications]({{site.baseurl}}/docs/deployments/gitops/create-application) +[Managing GitOps applications]({{site.baseurl}}/docs/deployments/gitops/manage-applications) +[Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) +[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) diff --git a/_docs/deployment/create-application.md b/_docs/deployments/gitops/create-application.md similarity index 96% rename from _docs/deployment/create-application.md rename to _docs/deployments/gitops/create-application.md index 47aea2d7..27425172 100644 --- a/_docs/deployment/create-application.md +++ b/_docs/deployments/gitops/create-application.md @@ -1,7 +1,8 @@ --- -title: "Creating applications" +title: "Creating GitOps applications" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- @@ -19,7 +20,7 @@ Codefresh provides all the options and functionality to create and manage Argo C * Edit and delete applications Once the application is created and synced to the cluster, it is displayed in the Applications dashboard. Here, you can select an application to update the application's configuration settings, or delete it. - To monitor the health and sync status, deployments, and resources for the application, see [Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/). + To monitor the health and sync status, deployments, and resources for the application, see [Monitoring GitOps applications]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/). ### Application: Definitions Application definitions include the name, runtime, and the name of the YAML manifest. By default, the YAML manifest has the same name as that of the application. @@ -225,7 +226,7 @@ Track the application in the [Applications dashboard](https://g.codefresh.io/2.0 ### Related articles -[Monitoring applications]({{site.baseurl}})/docs/deployment/applications-dashboard) -[Managing applications]({{site.baseurl}})/docs/deployment/manage-applications) -[Home dashboard]({{site.baseurl}})docs/reporting/home-dashboard) -[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) \ No newline at end of file +[Monitoring GitOps applications]({{site.baseurl}})/docs/deployments/gitops/applications-dashboard) +[Managing GitOps applications]({{site.baseurl}})/docs/deployments/gitops/manage-applications) +[Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) +[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) \ No newline at end of file diff --git a/_docs/deployment/images.md b/_docs/deployments/gitops/images.md similarity index 92% rename from _docs/deployment/images.md rename to _docs/deployments/gitops/images.md index d4538a52..80984226 100644 --- a/_docs/deployment/images.md +++ b/_docs/deployments/gitops/images.md @@ -1,7 +1,8 @@ --- title: "Images in Codefresh" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- @@ -18,7 +19,7 @@ Complete the mandatory steps to see your Images in the Codefresh UI. Each step h 1. (Mandatory) Report image information to Codefresh. See the [report-image-info](https://github.com/codefresh-io/argo-hub/blob/main/workflows/codefresh-csdp/versions/0.0.6/docs/report-image-info.md){:target="\_blank"} example. -> If you are using an external GitHub Actions-based pipeline, we have a new template that combines image reporting and enrichment. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). +> If you are using an external GitHub Actions-based pipeline, we have a new template that combines image reporting and enrichment. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/gitops/image-enrichment-overview/). ### Image views in Codefresh * In the Codefresh UI, go to [Images](https://g.codefresh.io/2.0/images){:target="\_blank"}. @@ -111,3 +112,10 @@ Selecting **more details** for an image tag. | **3** | The Git details for this image tag, such as the Git hash, the Jira issue number, Git Pull Request, commit information, the name of the user who performed the commit. | | **4** | The workflow for the image step. Select to go to the Workflow.| | **5** | The log information for the build image step in the relevant workflow. Select to view Logs panel. | + +## Related articles + +[Creating GitOps applications]({{site.baseurl}}/docs/deployments/gitops/create-application) +[Managing GitOps applications]({{site.baseurl}}/docs/deployments/gitops/manage-applications) +[Image enrichment with integrations]({{site.baseurl}}/integrations/image-enrichment-overview) +[Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) diff --git a/_docs/deployment/install-argo-rollouts.md b/_docs/deployments/gitops/install-argo-rollouts.md similarity index 74% rename from _docs/deployment/install-argo-rollouts.md rename to _docs/deployments/gitops/install-argo-rollouts.md index 22f6afb7..0847b58c 100644 --- a/_docs/deployment/install-argo-rollouts.md +++ b/_docs/deployments/gitops/install-argo-rollouts.md @@ -1,12 +1,13 @@ --- -title: "Install Argo Rollouts" +title: "Progressive delivery with GitOps" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- -Install Argo Rollouts on managed clusters with a single click. With Argo Rollouts installed on your cluster, you can visualize rollout progress for deployed applications in the [Applications dashboard]({{site.baseurl}}/docs/deployment/applications-dashboard/#rollout-progress-visualization). +Install Argo Rollouts on managed clusters with a single click. With Argo Rollouts installed on your cluster, you can visualize rollout progress for deployed applications in the [Applications dashboard]({{site.baseurl}}/docs/deployments/gitops/applications-dashboard/#rollout-progress-visualization). If Argo Rollouts has not been installed, an **Install Argo Rollouts** button is displayed on selecting the managed cluster. 1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. @@ -24,4 +25,4 @@ If Argo Rollouts has not been installed, an **Install Argo Rollouts** button is %} ### Related articles -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) \ No newline at end of file +[Add external clusters to runtimes]({{site.baseurl}}/docs/installation/managed-cluster/) \ No newline at end of file diff --git a/_docs/deployment/manage-application.md b/_docs/deployments/gitops/manage-application.md similarity index 96% rename from _docs/deployment/manage-application.md rename to _docs/deployments/gitops/manage-application.md index 2ccd3e16..15a0049d 100644 --- a/_docs/deployment/manage-application.md +++ b/_docs/deployments/gitops/manage-application.md @@ -1,7 +1,8 @@ --- -title: "Managing applications" +title: "Managing GitOps applications" description: "" -group: deployment +group: deployments +sub_group: gitops toc: true --- @@ -49,8 +50,8 @@ Update General or Advanced configuration settings for a deployed application thr {:start="3"} 1. Update the **General** or **Advanced** configuration settings as needed: - [General configuration]({{site.baseurl}}/docs/deployment/create-application/#application-general-configuration-settings) - [Advanced configuration]({{site.baseurl}}/docs/deployment/create-application/#application-advanced-configuration-settings) + [General configuration]({{site.baseurl}}/docs/deployments/gitops/create-application/#application-general-configuration-settings) + [Advanced configuration]({{site.baseurl}}/docs/deployments/gitops/create-application/#application-advanced-configuration-settings) When you change a setting, the Commit and Discard Changes buttons are displayed. {% include @@ -218,7 +219,7 @@ For example, if you made changes to `api` resources or `audit` resources, type ` Delete an application from Codefresh. Deleting an application deletes the manifest from the Git repository, and then from the cluster where it is deployed. When deleted from the cluster, the application is removed from the Applications dashboard in Codefresh. >**Prune resources** in the application's General settings determines the scope of the delete action. -When selected, both the application and its resources are deleted. When cleared, only the application is deleted. For more information, review [Sync settings]({{site.baseurl}}/docs/deployment/create-application/#sync-settings). +When selected, both the application and its resources are deleted. When cleared, only the application is deleted. For more information, review [Sync settings]({{site.baseurl}}/docs/deployments/gitops/create-application/#sync-settings). Codefresh warns you of the implication of deleting the selected application in the Delete form. 1. In the Codefresh UI, go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard/list){:target="\_blank"}. @@ -351,10 +352,9 @@ The table describes the options for the `Rollout` resource. ### Related articles -[Creating applications]({{site.baseurl}}/docs/deployment/create-application) +[Creating GitOps applications]({{site.baseurl}}/docs/deployments/gitops/create-application) [Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard) -[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics) - +[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics) diff --git a/_docs/deployments/helm/custom-helm-uploads.md b/_docs/deployments/helm/custom-helm-uploads.md new file mode 100644 index 00000000..46da0a4d --- /dev/null +++ b/_docs/deployments/helm/custom-helm-uploads.md @@ -0,0 +1,125 @@ +--- +title: "Creating and uploading Helm packages" +description: "Manually create and upload Helm packages" +group: deployments +sub_group: helm +redirect_from: + - /docs/create-helm-artifacts-using-codefresh-pipeline/ +toc: true +--- + +Helm packages are just TAR files. Helm repositories are simple file hierarchies with an extra [index.yaml](https://helm.sh/docs/developing_charts/#the-chart-repository-structure){:target="\_blank"}. +You can run custom commands and manually upload indexes and packages to a Helm repo. + +>This articles shows some non-standard Helm examples. + For the basic use cases, or if you are just getting started with Helm, see our [Helm quick start guide]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/) and [Using Helm in CI pipelines]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + +## Package a Helm chart +Below is an example of a freestyle step in a Codefresh pipeline that packages the Helm chart and then extracts the chart name from the command output. It also saves that package name in an environment variable for later use. + + `YAML` +{% highlight yaml %} +{% raw %} +helm_package: + image: devth/helm + commands: + - cf_export PACKAGE=$(helm package | cut -d " " -f 8) +{% endraw %} +{% endhighlight %} + +The `helm package` command expects a path to an unpacked chart. Replace `` in the example with the directory that holds your chart files. Note that this directory must have the same name as the chart name, as per Helm requirements.
      +See [Helm package docs](https://github.com/kubernetes/helm/blob/master/docs/helm/helm_package.md){:target="_blank"} and [Helm charts overview](https://github.com/kubernetes/helm/blob/master/docs/charts.md){:target="_blank"} for more information. + +{{site.data.callout.callout_info}} +To use `cf_export`and make the variable available to other steps in the pipeline, see [Variables in pipelines]({{site.baseurl}}/docs/pipelines/variables). +{{site.data.callout.end}} + +## Example 1: Push the chart to GCS based Helm Repository +The first example pushes the packaged chart into a public cloud storage service, like AWS S3, Azure Storage, or Google Cloud Storage. We chose Google Cloud Storage (GCS) for this example. +Our pipeline has three steps: + +{:start="1"} +1. download_index: download the Helm `index.yaml` file from GCS, or create one of it's not there. + +{:start="2"} +2. helm_package_merge: package the chart as described earlier, and also merge the new package into the downloaded `index.yaml` file, using the `helm repo index --merge` command. + +{:start="3"} +3. push_gcs: upload the updated `index.yaml` file and the newly created package to GCS. + + `YAML` +{% highlight yaml %} +{% raw %} +steps: + download_index: + image: appropriate/curl:latest + commands: + - 'curl https://storage.googleapis.com/$GOOGLE_BUCKET_NAME/index.yaml --output ./index.yaml --fail || :' + - '[ ! -f ./index.yaml ] && echo "apiVersion: v1">./index.yaml' + helm_package_merge: + image: devth/helm + commands: + - cf_export PACKAGE=$(helm package | cut -d " " -f 8) + - helm repo index --merge ./index.yaml . + push_gcs: + image: camil/gsutil + commands: + - echo -E $GOOGLE_CREDENTIALS > /gcs-creds.json + - echo -e "[Credentials]\ngs_service_key_file = /gcs-creds.json\n[GSUtil]\ndefault_project_id = $GOOGLE_PROJECT_ID" > /root/.boto + - gsutil cp ./index.yaml gs://$GOOGLE_BUCKET_NAME + - gsutil cp $PACKAGE gs://$GOOGLE_BUCKET_NAME +{% endraw %} +{% endhighlight %} + + +### Environment setup + +This pipeline references some predefined environment variables such as `GOOGLE_BUCKET_NAME`, `GOOGLE_PROJECT_ID` and `GOOGLE_CREDENTIALS`. +For this example, we created a service account with appropriate permissions in Google Cloud, and saved the credentials into `GOOGLE_CREDENTIALS` as a Codefresh Secret.
      +For more information, see: +[Authenticating with Google services](https://cloud.google.com/storage/docs/authentication#service_accounts){:target="_blank"}.
      +[Codefresh pipeline configuration and secrets](https://codefresh.io/docs/docs/codefresh-yaml/variables/#user-provided-variables){:target="_blank"}. + +## Example 2: Push the chart to Chart Museum +Chart Museum is a Helm repository *server* that has an HTTP API, pluggable backends, authentication, and more. +Read more about [Chart Museum](https://github.com/kubernetes-helm/chartmuseum){:target="_blank"}. + +In this example, we already have a Chart Museum server running, so we'll push the packaged chart to it. + +The steps will be: + +{:start="1"} +1. helm_package: package the chart as described earlier. + +{:start="2"} +2. get_repo_url: In order to avoid hard-coding the repository URL into the pipeline, we will retrieve it from the Codefresh Helm integration. +In this case, we have added our repository with Codefresh as described in [Using external Helml repos in Codefresh pipelines]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories). +Replace `` in the example with the name you gave to your repository when you added it to Codefresh. + +{:start="3"} +3. helm_push: call the Chart Museum HTTP api to just upload the package. Chart Museum will take care of the rest. + + `YAML` +{% highlight yaml %} +{% raw %} +steps: + helm_package: + image: devth/helm + commands: + - cf_export PACKAGE=$(helm package | cut -d " " -f 8) + get_repo_url: + image: codefresh/cli:latest + commands: + - cf_export HELM_URL=$(codefresh get ctx -o=yaml | grep repositoryUrl | cut -d "'" -f 2) + helm_push: + image: appropriate/curl + commands: + - curl --data-binary "@$PACKAGE" $HELM_URL/api/charts +{% endraw %} +{% endhighlight %} + + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Using a managed Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[Helm environment promotion]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) diff --git a/_docs/deployments/helm/helm-charts-and-repositories.md b/_docs/deployments/helm/helm-charts-and-repositories.md new file mode 100644 index 00000000..8edfe049 --- /dev/null +++ b/_docs/deployments/helm/helm-charts-and-repositories.md @@ -0,0 +1,111 @@ +--- +title: "Using external Helml repos in Codefresh pipelines" +description: "Use external Helm Charts and repositories in Codefresh pipelines" +group: deployments +sub_group: helm +toc: true +--- +Codefresh allows you to integrate with external Helm repositories and Helm charts in the Helm Charts page. +It is optional to use external Helm repositories as all Codefresh accounts already include a [built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/). + +## Add an external Helm repository + +Easily add your own Helm charts. +By default, we show charts from the [official Helm repository](https://github.com/kubernetes/charts){:target="_blank"}. + +1. In the Codefresh UI, from the Artifacts section in the sidebar, select [**Helm Charts**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. On the top right, click **Add Existing Helm Repository**. + You are taken to Pipeline Integrations. +1. In the Integrations page, click **Add Helm Repository**, and then select the type of Helm repo to add from the list. +1. Enter the **Helm repository name** and **URL**. + Do not include the specific path to `index.yaml` in the URL. + +{% include image.html +lightbox="true" +file="/images/deployments/helm/quick-helm-integration.png" +url="/images/deployments/helm/quick-helm-integration.png" +alt="Adding a Helm repository" +caption="Adding a Helm repository" +max-width="70%" +%} + +1. If your repository doesn't require authentication, to complete the process, click **Save**. + +For more details on adding Helm repositories, see [Helm integrations]({{site.baseurl}}/docs/integrations/helm/). + +## Use a Helm repository in a Codefresh pipeline + +Once connected, inject any Helm repository context into Codefresh pipelines. + +1. From the Pipelines page, select the pipeline into which to import the Helm configuation. +1. In the Workflows tab, do one of the following: + * Click **Variables** on the right, and then click the **Settings** (gear) icon. + * Click the context menu next to the settings icon. +1. Click on **Import from/Add shared configuration**, and select the name of the repository. + The repository settings are injected as environment variables into the pipeline. + +{% include image.html +lightbox="true" +file="/images/deployments/helm/connect-helm-repo.png" +url="/images/deployments/helm/connect-helm-repo.png" +alt="Connecting a Helm repository in the pipeline" +caption="Connecting a Helm repository in the pipeline" +max-width="70%" +%} + +1. If you are using the Helm step, the step uses these settings to connect to your authenticated repository automatically. For details, see [Using Helm in Codefresh pipelines]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + +## Install a chart from your Helm repository +Install a chart from a Helm repository to your cluster. + +* Values in the Chart Install wizard are provided in the following order: + 1. Chart Default Values (implicitly part of the chart). + 2. Overridden default values (provided as values file, provided only if edited by the user). + 3. Supplied values files from Yaml Shared Configuration. + 4. Override variables are provided as `--set` arguments. +* Variables available for custom pipelines: + If you select a custom pipeline, the following variables are available: + * `CF_HELM_RELEASE` - name of release + * `CF_HELM_KUBE_CONTEXT` - kubectl context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/#work-with-your-services)) + * `CF_HELM_INSTALLATION_NAMESPACE` - desired namespace for the release + * `CF_HELM_CHART_VERSION` - Chart Version, + * `CF_HELM_CHART_NAME` - Chart Name + * `CF_HELM_CONTEXTS` - values from [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/#using-shared-helm-values) + * `CF_HELM_VALUES` - extra values + * `CF_HELM_SET` - extra values, + * `CF_HELM_CHART_REPO_URL` - URL of Chart repository + * `CF_HELM_COMMIT_MESSAGE` - Message to show in Helm GUI, + +
      + +**Before you begin** +* Make sure tht you have a Kubernetes integration with the cluster and namespace, as described [here]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/) + +**How to** +1. In the Codefresh UI, from the Artifacts section in the sidebar, select [**Helm Charts**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. In the row with the chart to install, click **Install**. +1. Enter the **Release name** for the chart, and select the **Chart version** to install. +1. From Cluster Information, select a Kubernetes **Cluster** and the **Namespace** to install to. +1. Select the **Pipeline** to install to. +1. If required, edit the **Default Chart Values** to view and override them. + When the default values yaml is changed, it is provided to Helm install as a values file. You can revert to the original values cby clicking Revert. +1. To provide additional values files, do the following: + * From the **Import from configuration** list, select **Add new context of type: YAML**. + * Enter the **Context name**. + * Insert your values YAML, and click **Save**. + The YAML is saved and added to the list of configuration files that you can import from. +1. To override variable values, click **+Add variable**, and then enter the Key and Value. + > The order of value configurations matter for Helm: most recently provided values override earlier ones. +1. Click **Install**. You can observe the newly installed release in Helm Releases. + +You can also install Helm releases from [any Helm environment board]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion). + + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm integrations]({{site.baseurl}}/docs/integrations/helm/) +[Helm Dashboard]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) +[Helm Promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) +[Helm best practices]({{site.baseurl}}/docs/ci-cd-guides/helm-best-practices/) + + diff --git a/_docs/deployments/helm/helm-environment-promotion.md b/_docs/deployments/helm/helm-environment-promotion.md new file mode 100644 index 00000000..21466e5d --- /dev/null +++ b/_docs/deployments/helm/helm-environment-promotion.md @@ -0,0 +1,290 @@ +--- + +title: "Promoting Helm Environments" +description: "Manage your Helm Environments with the Codefresh UI" +group: deployments +sub_group: helm +toc: true +--- +Apart from the [Helm Releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) that show your Kubernetes clusters at the application level, Codefresh also comes with a special environment board that allows you to track one or more applications as they move within your infrastructure (example, Dev, QA, Prod). + +The environment board can function both as an overview of the whole lifecycle of the application, as well as a tool to shift-left/right Helm releases between environments. + +Here is an example board: + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/board.png" +url="/images/deployments/helm/promotion/board.png" +alt="Helm Environment Dashboard" +caption="Helm Environment Dashboard" +max-width="80%" +%} + +This board has three environments that correspond to Kubernetes clusters: + * A Load-testing environment where applications are stress-tested + * A Staging environment where smoke tests are performed + * The Production environment where applications go live + +You can see that a Python example app at version 0.2.0 is already in production. Version 0.3.0 is waiting in the staging environment for smoke tests. Once it is tested it can be dragged to the production column therefore *promoting* it to production status. + + +## Using the Helm Environment Board + +You can create and manage as many Helm promotion boards as you want. +For each board, you define how many columns it will contain, where each column is a Helm-enabled Kubernetes cluster. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/helm-environments.png" +url="/images/deployments/helm/promotion/helm-environments.png" +alt="Helm environments column structure" +caption="Helm environments column structure" +max-width="80%" +%} + +You can use different clusters for each column or different namespaces from the same cluster. You can even mix and match both approaches. +As an example, you could create a Helm board with the following environments: + +* Column 1, dev cluster showing all namespaces (DEV) +* Column 2, namespace qa from cluster staging (QA) +* Column 3, namespace staging from cluster staging (STAGING) +* Column 4, namespace production from cluster prod (PRODUCTION) + +Once you have your columns in place, you can move Helm releases between clusters/namespaces by drag-n-drop. Each Helm release can be dragged to any other column either promoting it, for example, from QA to Production, or shifting it left, for example, from Production to QA. + +## Creating a custom Helm Board + +Create your own Helm board with a single or multiple Helm applications. You can create as many boards as you want. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Boards**](https://g.codefresh.io/helm/helm-kanban/){:target="\_blank"}. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/board-selection.png" +url="/images/deployments/helm/promotion/board-selection.png" +alt="Helm board selection" +caption="Helm board selection" +max-width="80%" +%} + +{:start="2"} +1. On the top-right, click **Add board**. +1. Enter the title of your board as the **Board Name**. +1. Optional. In the **Release name regex expression** field, enter the Regex expression for this board to filter all its environments to show only Helm releases that match this regular expression. + Regex expressions are very helpful if you want your environment board to focus only on a single or set of Helm applications. + To see all Helm releases of your clusters, leave empty. + +You can edit both options for an existing board if you change your mind later. + +### Define Clusters/Namespaces for each Environment + +Once you create your Helm environment board, you are ready to define its columns. + +* To add a column, on the top-right, click **Add environment***. + You will see the environment details dialog: + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/edit-helm-environment.png" +url="/images/deployments/helm/promotion/edit-helm-environment.png" +alt="Edit Helm environment" +caption="Edit Helm environment" +max-width="50%" +%} + + For each environment you can select: + * A name for that column + * The Kubernetes cluster it corresponds to + * One or more namespaces that define this environment (You can even toggle the switch for a regex match) + * A custom pipeline that will be used when a Helm release is installed for the first time in this column + * A custom pipeline that will be used when a Helm release is dragged in this column (promoted from another column) + * Optional. One or more charts to use for the environment. Defining charts for the environment saves you from having to search through all the charts in your Helm repository. When you install an application from the install graphical dialog, only the selected chart(s) are displayed. + * A presentation color to easily identify the environment on the board (For example, a "production" environment should have a red color) + +You can also select no namespace at all. In that case, the column will show Helm releases for all namespaces in that cluster. +You can change all these options after creation, so feel free to change your mind. + +Repeat the same process for additional environments. Remember that you can name your environment as you want and define any combination of cluster/namespace for any of the columns. This gives you a lot of power to define a Helm environment board that matches exactly your own process. + +You don't have to define the environments in order. You can drag-n-drop columns to change their order after the initial creation. + + +### Installing Helm Releases on each Environment + +If you already have [pipelines that deploy Helm releases]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/), your columns are populated automatically with information. + +For each Helm release, you will get some basic details such as the chart version and the name of the release. You can expand a release by clicking on the arrow button to get additional information such as the docker images and the replicas of each pod that are contained in the release. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/expand.png" +url="/images/deployments/helm/promotion/expand.png" +alt="Helm release details" +caption="Helm release details" +max-width="50%" +%} + +You can even install manually a Helm release from any external repository by clicking on the *PLUS* button at the header of each column. In that case you will see a list of possible Helm applications to choose from. + +You will be able to select the target cluster and namespace as well as the chart values [as any other Helm release]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#install-chart-from-your-helm-repository). + + +## Moving Releases between Environments + +A Helm environment board can be used by different stakeholders in order to get the detailed status of all defined environments. In that aspect it can act as a read-only tool that simply shows the results of Codefresh pipelines that deploy Helm applications. + +### Promoting Helm Releases with the UI + +You can also use the board as an action tool in order to promote/demote a Helm release between individual environments. To move a Helm release between environments just drag-n-drop it to a different column. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/shift-right.png" +url="/images/deployments/helm/promotion/shift-right.png" +alt="Promoting a Helm release" +caption="Promoting a Helm release" +max-width="80%" +%} + +Once you drop the release you will also see the promotion dialog. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/promote-settings.png" +url="/images/deployments/helm/promotion/promote-settings.png" +alt="Promotion Settings" +caption="Promotion Settings" +max-width="40%" +%} + +All fields here will be auto-filled according to the Helm release that you dragged. You can also choose a custom pipeline (see below) for the promotion if you don't want to use the default one. + +By clicking the *Variables* button you can override the chart values, import a specific shared configuration or add new values. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/value-options.png" +url="/images/deployments/helm/promotion/value-options.png" +alt="Changing deployment values" +caption="Changing deployment values" +max-width="40%" +%} + +By default Codefresh will use a built-in install/upgrade pipeline for performing the promotion. You can choose your own pipeline from the promotion dialog. That pipeline will be automatically provided with the following [environment variables]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/#overriding-the-default-helm-actions): + +* `CF_HELM_RELEASE` - name of release +* `CF_HELM_KUBE_CONTEXT` - `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_NAMESPACE` - Tiller Namespace if you use Helm 2 +* `CF_HELM_INSTALLATION_NAMESPACE` - namespace where release is promoted to +* `CF_HELM_CONTEXTS` - [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration) Helm contexts +* `CF_HELM_VALUES` - Helm chart values +* `CF_HELM_SET` - Additional values there were overriden +* `CF_HELM_CHART_JSON_GZIP` - Gzipped JSON of Helm chart (only for Helm 3) +* `CF_HELM_CHART_JSON` - JSON of Helm chart (only for Helm 2) +* `CF_HELM_BOARD` - Name of the board that is used for the drag-n-drop-action +* `CF_HELM_TARGET_SECTION` - Name of the Source Environment that you are promoting from +* `CF_HELM_SOURCE_SECTION` - Name of the Target Environment that you are promoting to + + +Note that the variable `CF_HELM_CHART_JSON_GZIP` is both compressed and base64 encoded. To get the raw value you need a command like `echo $CF_HELM_CHART_JSON_GZIP | base64 -d | gunzip` + +>Overriding the default pipeline can only be done by [Codefresh admin users]({{site.baseurl}}/docs/administration/access-control/#users-and-administrators). + +Once you click the *update* button, a new build will run that will perform the deployment. + +Note that you can move releases to any column both on the right and on the left of the current column. This is helpful if for example you find a bug in your production environment and you want to bring it back to a staging environment for debugging. + +### Promoting Helm releases programmatically + +You can also promote Helm releases with the [Codefresh CLI](https://codefresh-io.github.io/cli/predefined-pipelines/promote-helm-release/){:target="\_blank"}. + +Once you have [installed the CLI](https://codefresh-io.github.io/cli/getting-started/){:target="\_blank"}, you can use it from an external script or terminal with the `helm-promotion` parameter: + +{% highlight shell %} +{% raw %} +codefresh helm-promotion --board MySampleBoard --source Staging --target Production --source-release my-app --set myenv=prod +{% endraw %} +{% endhighlight %} + +Here we promote the Helm release `my-app` to the *Production* column overriding also the `myenv` value. + +Remember that the Codefresh CLI can also run in a Codefresh pipeline with a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +Here is an example of a Helm promotion from within a Codefresh pipeline. + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + triggerstep: + title: trigger + image: codefresh/cli + commands: + - 'codefresh helm-promotion --board MySampleBoard --source Staging --target Production --source-release my-app --namespace my-namespace --set myenv=prod' +{% endraw %} +{% endhighlight %} + +## Viewing the promotion pipeline + +When you promote a Helm Release for a Board, you can view the pipeline for that release. + +1. Click on Boards under the Helm section on the left-hand side +2. Select the board you want to view +3. Select the Builds tab on the top +4. Here, you can see the Promotion Pipelines / builds for promoting a Release + +## Editing your Helm Boards + +For any existing Helm board, you have the following options: + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/board-management.png" +url="/images/deployments/helm/promotion/board-management.png" +alt="Editing a Helm environment" +caption="Editing a Helm environment" +max-width="80%" +%} + + +1. The refresh button will update the board with the current state of the clusters +1. The filtering menu can be used to further constrain the Helm releases shown in each column. +1. The *edit properties* button allows you to change again the title of the board as well as a global filter for Helm releases +1. The *remove board* completely deletes the present board from the Codefresh UI +1. The environment details on the environment header are: +* The edit button to change again the options for this column (shown on mouse hover) +* The delete button to remove this column from the board (shown on mouse hover) +* The plus button to install a new chart. If you selected one or more charts when you defined your environment, only the selected charts are displayed. +* A numeric value that shows how many releases are contained on this environment +1. The delete button allows you to uninstall a Helm release for an environment + +The filtering options allow you to further constrain the Helm release shown for the whole board. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/promotion/filter.png" +url="/images/deployments/helm/promotion/filter.png" +alt="Filtering options" +caption="Filtering options" +max-width="50%" +%} + +The filters are especially helpful in Helm boards with large numbers of environments and/or releases. + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Using external Helml repos in Codefresh pipelines]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#add-helm-repository) +[Managing Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) +[Environment Dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/environment-dashboard/) diff --git a/_docs/deployments/helm/helm-releases-management.md b/_docs/deployments/helm/helm-releases-management.md new file mode 100644 index 00000000..991701f7 --- /dev/null +++ b/_docs/deployments/helm/helm-releases-management.md @@ -0,0 +1,263 @@ +--- +title: "Managing Helm releases" +description: "Manage Helm deployments from the Codefresh UI" +group: deployments +sub_group: helm +redirect_from: + - /docs/helm-releases-management/ + - /docs/deployments/helm/helm3/ +toc: true +--- +Codefresh has built-in integration for Helm that provides a unique view into your production Kubernetes cluster. +In Helm Releases, you can see the current status of your cluster, including the currently deployed releases, their previous revisions including change tracking, and even roll back to a previous release. + +Codefresh also offers [an environment view for Helm releases]({{site.baseurl}}/docs/deploy-to-kubernetes/environment-dashboard/) as well as [a promotion dashboard]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion). + + +## View Helm releases and release information + +View all the Helm releases in your cluster, and drill down into a specific release to see its services, deployed versions, manifests and more. + +> Make sure you have [connected your Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/adding-non-gke-kubernetes-cluster/) to Codefresh. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Releases**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/helm-release-dashboard.png" +url="/images/deployments/helm/dashboard/helm-release-dashboard.png" +alt="Helm Releases" +caption="Helm Releases" +max-width="90%" +%} + + + + +{:start="2"} +1. To see details for a specific release, click the release name. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/services.png" +url="/images/deployments/helm/dashboard/services.png" +alt="Kubernetes Services" +caption="Kubernetes Services" +max-width="70%" +%} + +The History tab shows all previous releases. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/history.png" +url="/images/deployments/helm/dashboard/history.png" +alt="Helm History" +caption="Helm History" +max-width="60%" +%} + +You can further expand a release revision to see exactly what files were changed in this release. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/diff.png" +url="/images/deployments/helm/dashboard/diff.png" +alt="Helm diff" +caption="Helm diff" +max-width="60%" +%} + +There are other tabs that show you the chart used, the values as well as the final manifests that were actually deployed. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/manifests.png" +url="/images/deployments/helm/dashboard/manifests.png" +alt="Final rendered manifests" +caption="Final rendered manifests" +max-width="50%" +%} + +## Add labels to Kubernetes services + +For better visibility into services, add the [recommended labels](https://helm.sh/docs/topics/chart_best_practices/labels/){:target="\_blank"} to your Kubernetes service. + +{% highlight yaml %} +{% raw %} + apiVersion: v1 +kind: Service +metadata: + name: {{ template "fullname" . }} + labels: + app.kubernetes.io/name: "{{ template "name" . }}" + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" +{% endraw %} +{% endhighlight %} + +To use the instance label for something different, you can also use a release label instead: + +{% highlight yaml %} +{% raw %} +release: {{ .Release.Name }} +{% endraw %} +{% endhighlight %} + + + +## Add an upgrade message + +Codefresh allows you to display a meaningful description for each release in the release history. This message +can help show the main reason behind each release, or any other message that is convenient for you. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/helm-commit-message.png" +url="/images/deployments/helm/dashboard/helm-commit-message.png" +alt="Helm release message" +caption="Helm release message" +max-width="70%" +%} + +You can set this message for your Helm release in three ways: + +1. When you manually install a Helm release from the [Helm charts screen]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#install-chart-from-your-helm-repository), there is a field for this message. +1. Set the property `commit_message` inside the [notes.txt](https://helm.sh/docs/chart_template_guide/notes_files/){:target="\_blank"} file of your chart. +1. By providing an environment variable called `COMMIT_MESSAGE` within your [pipeline Helm step]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + + +## Roll back a Helm release + +You can rollback to a previous revision of a release in the History tab. + +1. Click the Helm release for which to perform a rollback, and then click the **History** tab. +1. To rollback to a specific release, click **Rollback** in the row. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/rollback.png" +url="/images/deployments/helm/dashboard/rollback.png" +alt="Rolling back to a previous release" +caption="Rolling back to a previous release" +max-width="50%" +%} + +>It takes time to complete rollback for a release, and the change in the cluster is not instantly updated in the Codefresh UI. If you also use a [custom rollback pipeline](#overriding-the-default-helm-actions), the delay between the cluster update and the UI refresh is even longer. + +## Helm UI actions + +From the main release screen, you have some additional actions. + +You can issue a [Helm test](https://github.com/kubernetes/helm/blob/master/docs/chart_tests.md) by clicking on the 'Run Test' button on the desired chart row. + +You can delete a release by clicking on the 'Delete' button on the desired chart row. +For deletion options, see the [helm delete documentation](https://github.com/kubernetes/helm/blob/master/docs/helm/helm_delete.md){:target="\_blank"}, for example, *purge* will remove the revision from the release history. + +## Helm deployment badge + +Similar to a [build badge]({{site.baseurl}}/docs/pipelines/build-status/#using-the-build-badge), you can also get a deployment badge for a Helm release. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Releases**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. In the row with the Helm release for which to add a deployment badge, click the **Settings** (gear) icon. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/helm-badge.png" +url="/images/deployments/helm/dashboard/helm-badge.png" +alt="Helm Deployment badge" +caption="Helm Deployment badge" +max-width="60%" +%} + +{:start="3"} +1. To get deployment information, click **Badge**. + Codefresh provides the Markdown/HTML/Link segment that you can embed in README or other documents to show deployment information. + +## Overriding default Helm actions for releases + +By default, when you take an action in the UI, Codefresh executes the native Helm command corresponding to that action: + +* `helm test` for testing a chart +* `helm rollback` for rollbacks +* `helm delete` or `helm uninstall --keep-history` for delete +* `helm delete --purge ` or `helm uninstall ` for purging a release + +You can override these actions for a specific Helm release by defining custom pipelines for each action. This way you can add your extra logic on top of these actions. For example your own Helm uninstall pipeline might also have a notification step that posts a message to a Slack channel after a release is removed. + +>Only [Codefresh admin users]({{site.baseurl}}/docs/administration/access-control/#users-and-administrators) can override the default pipelines defined for a Helm release. + +1. In the Codefresh UI, from the DevOps Insights section in the sidebar, select [**Helm Releases**](https://g.codefresh.io/helm/releases/releasesNew/){:target="\_blank"}. +1. In the row with the Helm release for which to override default actions, click the **Settings** (gear) icon. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/dashboard/override-helm-actions.png" +url="/images/deployments/helm/dashboard/override-helm-actions.png" +alt="Changing default Helm actions" +caption="Changing default Helm actions" +max-width="50%" +%} + +{:start="3"} +1. Select the pipeline to use for the respective actions. + +### Environment variables for custom Helm commands +If you do override any of these actions, the following [environment variables]({{site.baseurl}}/docs/codefresh-yaml/variables/) are available in the respective pipeline, so that you can use your own custom Helm command. + +**Helm Test pipeline** +* `CF_HELM_RELEASE`: Name of release +* `CF_HELM_KUBE_CONTEXT`: `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_NAMESPACE`: Namespace where release is stored +* `CF_HELM_TIMEOUT`: Time in seconds to wait for any individual Kubernetes operation +* `CF_HELM_CLEANUP`: Delete test pods upon completion + + + +**Helm Rollback pipeline** +* `CF_HELM_VERSION`: Helm version, ex.: 3.0.1, 2.7.0 +* `CF_HELM_RELEASE`: Name of release on cluster +* `CF_HELM_REVISION`: Revision to use for rollback +* `CF_HELM_KUBE_CONTEXT`: `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_NAMESPACE`: Namespace where release is stored + + +**Helm Delete pipeline** +* `CF_HELM_PURGE`: Boolean, delete release from store +* `CF_HELM_RELEASE`: Name of release +* `CF_HELM_TIMEOUT`: Time in seconds to wait for any individual Kubernetes operation +* `CF_HELM_HOOKS`: Prevent hooks from running during install +* `CF_HELM_KUBE_CONTEXT`: `kubectl` context name of target cluster (cluster name from [dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#work-with-your-services)) +* `CF_HELM_VERSION`: Helm version, ex.: 3.0.1, 2.7.0 +* `CF_HELM_NAMESPACE`: Namespace where release is stored + + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm charts and repositories]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/) +[Codefresh-managed Helm Repositories]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[Helm promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) \ No newline at end of file diff --git a/_docs/deployments/helm/managed-helm-repository.md b/_docs/deployments/helm/managed-helm-repository.md new file mode 100644 index 00000000..5dcd43b5 --- /dev/null +++ b/_docs/deployments/helm/managed-helm-repository.md @@ -0,0 +1,137 @@ +--- +title: "Using a managed Helm repository" +description: "Use the Codefresh integrated Helm repository" +group: deployments +sub_group: helm +toc: true +--- + +Codefresh provides fully managed, hosted Helm repositories for users. +While we automatically create a default managed repo for every Codefresh account, you can also add [external Helm repositories]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/). + +The built-in Helm repo that Codefresh creates, is private by default, allowing access only via Codefresh or via a Codefresh API token. + +> Tip: + You may be familiar with the popular open source Helm repository implementation called 'ChartMuseum', that Codefresh sponsors. Codefresh-managed repositories are based on, and therefore compatible with, ChartMuseum and its unique features. For details, see [ChartMuseum](https://github.com/kubernetes-helm/chartmuseum){:target="\_blank"}. + +## View Helm repository integrations + +The Codefresh-managed Helm repo is displayed with other Helm repositories you have added to Helm integrations. + +>You cannot delete the built-in Helm repo that Codefresh creates for you. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon, and then from the sidebar, select **Pipeline Integrations**. +1. Scroll to **Helm Repositories**, and then click **Configure**. + All the Helm integrations you set up are displayed. + +{% include +image.html +lightbox="true" +file="/images/deployments/helm/private-helm-repo/managed-helm-repo.png" +url="/images/deployments/helm/private-helm-repo/managed-helm-repo.png" +alt="Codefresh built-in Helm repository" +caption="Codefresh built-in Helm repository" +max-width="50%" +%} + + +## Get the chart repository URL +Get the chart repository URL for any Helm integration. +The URL is in the format: `cm://h.cfcr.io//`, where the default repo is `default`. + +* From the list of Helm integrations, select the integration and then click the **Edit** icon on the left. + The Helm Repository URL field displays the chart URL. + +## Codefresh Helm dashboards + +The Helm Charts and Helm Releases dashboards are automatically configured to work with your default managed repo to easily install charts and manage releases. +For more information, see [install chart from a Helm repository]({{site.baseurl}}/docs/deployments/helm/helm-charts-and-repositories/#install-chart-from-your-helm-repository) and [Managing Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/). + +## Use Codefresh CLI for advanced Helm repo management + +The Codefresh CLI supports advanced management options for your managed repository, without having to log in to the Codefresh UI. +For more information on CLI support for Helm repos, see the [CLI documentation on Helm Repos](https://codefresh-io.github.io/cli/helm-repos/){:target="\_blank"}. + + +## Set access level for managed repo + +The managed Helm repository supports two modes of access: +* Private +* Public + +By default, the managed Helm repo is created with `Private` access, meaning that read/write access is protected by Codefresh authentication. + +You can switch the access level to `Public`, which will make the repository accessible to anonymous users only *for read operations*. Write operations, even in public access mode, always require authentication. +Be very careful when you make your repo public, as the whole world will be able to access your charts. We recommend this setting only for quick demos and POCs. + +**How to** + +* Use the Codefresh CLI to toggle access level on a managed repo: + +{% highlight bash %} +codefresh patch helm-repo mycfrepo -public +{% endhighlight %} + +For more info, see the relevant section in the [Codefresh CLI documentation](https://codefresh-io.github.io/cli/helm-repos/update-helm-repo/){:target="\_blank"}. + +## Working with Helm CLI + +The private Helm repository offered by Codefresh is a standard Helm repo and will work with the vanilla Helm executable even outside of the Codefresh UI. +We suggest using the private [Helm repo from Codefresh pipelines]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/), but you can also use it from your workstation. + +### Add a Public repo to Helm + +If your repo is set to `public` access mode, you can use it just like any other HTTP Helm repository. +You can: + +{% highlight bash %} +helm repo add mycfrepo https://h.cfcr.io// +{% endhighlight %} + +### Add a Private repo to Helm + +If your repo is set to `private` access mode, the default, then the Helm client needs to authenticate with Codefresh. +To authenticate, you can use ChartMuseum's 'Helm Push' CLI plugin which adds support for authentication and chart manipulation on top of the basic Helm CLI functionality. + +We highly recommend that you familiarize yourself with the [Helm Push plugin](https://github.com/chartmuseum/helm-push){:target="\_blank"}. + +#### Install the Helm Push plugin + +{% highlight bash %} +helm plugin install https://github.com/chartmuseum/helm-push +{% endhighlight %} + +#### Configure the Helm Push plugin + +If you have the Codefresh CLI installed and configured, there's nothing you need to do. The Helm Push plugin picks up your settings automatically. +To learn about getting started with Codefresh CLI, see [CLI getting started](https://codefresh-io.github.io/cli/getting-started/). +To learn about manual authentication without depending on the Codefresh CLI, see [here](https://github.com/chartmuseum/helm-push#token). + +#### Add the private repo + +{% highlight bash %} +helm repo add mycfrepo cm://h.cfcr.io/kostis-codefresh/default +{% endhighlight %} + +Notice the protocol is `cm://` instead of `https://`. This indicates the custom authentication scheme supported by ChartMuseum Helm Push plugin. + +## Using in a Codefresh pipeline + +The Codefresh Helm plugin automatically handles authentication for managed repositories. You can use the plugin as you usually would. For more information, see the [Codefresh Helm plugin]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/). + +## Removing a Helm chart from a private Codefresh repository + +You can delete a Helm chart from your own Helm repository with the following HTTP call. + +{% highlight bash %} +curl -X DELETE -v -H "Authorization: Bearer " https://h.cfcr.io/api///charts// +{% endhighlight %} + +Replace values in `<>` with your own (also removing `<>` in the process). + +Generate an api key from [https://g.codefresh.io/user/settings](https://g.codefresh.io/user/settings) as explained in the [API page]({{site.baseurl}}/docs/integrations/codefresh-api/). + +## Related articles +[Using Helm in a Codefresh pipeline]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/) +[Helm integration]({{site.baseurl}}/docs/integrations/helm/) +[Managing Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management) diff --git a/_docs/deployments/helm/using-helm-in-codefresh-pipeline.md b/_docs/deployments/helm/using-helm-in-codefresh-pipeline.md new file mode 100644 index 00000000..f96ddd4f --- /dev/null +++ b/_docs/deployments/helm/using-helm-in-codefresh-pipeline.md @@ -0,0 +1,346 @@ +--- +title: "Using Helm in a Codefresh pipeline" +description: "Deploy and push Helm charts with Codefresh" +group: deployments +sub_group: helm +redirect_from: + - /docs/deployments/helm/create-helm-artifacts-using-codefresh-pipeline/ + - /docs/install-helm-chart-using-codefresh-pipeline/ +toc: true +--- + +We created a [special Helm step](https://codefresh.io/steps/step/helm){:target="\_blank"} for easy integration of Helm in Codefresh pipelines. The Helm step facilitates authentication, configuration, and execution of Helm commands. + +> If you have a special use case that is not covered by the Codefresh Helm step, you can always use the regular `helm` cli in a freestyle step. + In this case, you can use the simpler container `codefresh/kube-helm` which includes only Kubectl and helm tools. `kube-helm` is available on DockerHub: [https://hub.docker.com/r/codefresh/kube-helm/](https://hub.docker.com/r/codefresh/kube-helm/){:target="\_blank"}. + +If you are just starting with Helm, refer to our [Helm quick start guide]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/). And, if you prefer to work directly with code, see our [full Helm example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). + +## Helm setup + + + +To use Helm in your Codefresh pipeline you must do the following: + +1. Make sure that your application has a [Helm chart](https://helm.sh/docs/chart_template_guide/getting_started/) +1. Create a Helm package for your application from the chart +1. [Add a Kubernetes cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/) in Codefresh +1. Define a Helm repository or use the [one offered by Codefresh to all accounts]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +1. Import the Helm [configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) into your pipeline variables +1. Use the Helm step in your [yml build definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + +Let's see these steps in order. + +### Step 1: Create a Helm chart for your application + +Helm applications are bundled in special archives called *Charts*. You can create a Helm +chart for your application by following [the official documentation on charts](https://helm.sh/docs/chart_template_guide/getting_started/){:target="\_blank"}. + +The example Codefresh application includes a [sample chart](https://github.com/codefresh-contrib/python-flask-sample-app/tree/with-helm/charts/python){:target="\_blank"}, used in our Helm quick start guide, mentioned earlier in this article. + +You can create the chart manually or by using the [helm create](https://helm.sh/docs/helm/#helm-create){:target="\_blank"} command on your workstation. There are also several third-party tools that can create Helm packages for you such as [Draft](https://draft.sh/){:target="\_blank"}. + +Once your Helm chart is ready, commit it to a folder called `charts`, in the same Git repository that contains the source code of your application. Codefresh can also work with Helm charts that are in different Git repositories. We suggest however that you keep both the source code and the Helm chart of an application in the same Git repository to make chart management much easier. + + +### Step 2: Select Kubernetes cluster for deployment + +The Helm pipeline step requires the configuration of a `kube_context` variable that determines the Kubernetes cluster used for the deployment. + +1. Connect your Kubernetes cluster with Codefresh, as described [here]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/). + +1. Provide the cluster to the Helm step by adding the `KUBE_CONTEXT` variable, where the value is the connection *name* entered when you created the connection. +> The connection name also appears as the title of the cluster in Kubernetes integration settings (Account Settings >Integrations > Kubernetes). + +{% include image.html +lightbox="true" +file="/images/deployments/helm/k8s-name.png" +url="/images/deployments/helm/k8s-name.png" +alt="Name of Kubernetes cluster" +caption="Name of Kubernetes cluster" +max-width="70%" +%} + +1. Verify that your cluster is set up for Helm, from the sidebar, below DevOps Insights, select **Helm Releases**. + The [Helm releases]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/) in your cluster are displayed. If you have just started using Helm, the release page may be empty. + +### Step 3: Define a Helm repository + +To push your chart to a Helm repository, configure the target repository to work with. +Always a good practice to save Helm charts in Helm repositories, Codefresh supports a variety of private, authenticated Helm repositories +in addition to public HTTP repositories. Codefresh also provides a free, managed Helm repository for every account. + +* Either [connect your repository with Codefresh]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) +OR +* Obtain your [managed Helm repository URL]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/#chart-repository-url) + + +### Step 4: (Optional) Import the Helm configuration into your pipeline definition + +Once you have a connected to a Helm repository, attach it to the pipeline. + +1. Frpm the Pipelines page, select the pipeline into which to import the Helm configuation. +1. In the Workflows tab, do one of the following: + * Click **Variables** on the right, and then click the Settings (gear) icon in the variables section on the right. + * Click the context menu next to the settings icon. +1. Click on **Import from/Add shared configuration**, and from the list, select `CF_HELM_DEFAULT`. See [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). + +{% include image.html +lightbox="true" +file="/images/deployments/helm/import-helm-configuration.png" +url="/images/deployments/helm/import-helm-configuration.png" +alt="Connecting a Helm repository in the pipeline" +caption="Connecting a Helm repository in the pipeline" +max-width="50%" +%} + + +### Step 5: Use the Helm freestyle step in the pipeline + +You can now use the Helm freestyle step in the `codefresh.yml` file. This step is only needed in pipelines that actually upload/fetch Helm charts to/from Helm repositories. If your pipeline directly installs a Helm chart from the Git filesystem, there is no need to import a Helm configuration. + +>Currently, you can use only one Helm configuration in the same pipeline. We are aware +of this limitation and will soon improve the way Codefresh works with multiple Helm configurations. + + + +* Use the Helm typed step from the [Step Marketplace](https://codefresh.io/steps/step/helm){:target="\_blank"}. +* Configure the Helm step using environment variables, as described [here]({{site.baseurl}}/docs/codefresh-yaml/variables/#user-provided-variables). + +The example below illustrates how to provide variables as part of the Helm step definition: + +```yaml +deploy: + type: helm + arguments: + action: install + chart_name: test_chart + release_name: first + helm_version: 3.0.3 + kube_context: my-kubernetes-context + custom_values: + - 'pat.arr="{one,two,three}"' + - 'STR_WITH_COMAS="one\,two\,three"' +``` + + + +#### Helm step action modes + +The Helm step can operate in one of three modes, as defined by the `action` field: + +1. `install`: Installs the chart into a Kubernetes cluster. This is the default mode if not explicitly set. +2. `push`: Packages the chart and pushes it to the repository. +3. `auth`: Authenticate only. Only sets up authentication and adds the repo to the Helm. This mode is useful to write your own Helm commands using the freestyle step's `commands` property, but still allow the step to handle authentication. + + +#### Helm values + +* To supply a value file, add to the Helm step, `custom_values_file`, with the value pointing to an existing values file. +* To override specific values, add to the Helm step, `custom_values` followed by the path to the value to set. For example, `myservice_imageTag`. Note that `.` (dot) should be replaced with `_` (underscore). The value of the variable is used to override or set the templated property. + +Examples: +```yaml +... + custom_values: + - 'myimage_pullPolicy=Always' +... +``` +results in: +`--set myimage.pullPolicy=Always` + +```yaml +... + custom_value_files: + - 'values-prod.yaml' +... +``` +results in: +`--values values-prod.yaml` + +If a variable already contains a `_` (underscore) in its name, replace it with `__` (double underscore). + +## Helm usage examples + +The following sections illustrate all three modes of Helm usage. + +You can also look at the [GitHub repository](https://github.com/codefresh-contrib/helm-sample-app){:target="\_blank"} of [our Helm example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/) for full pipelines: + +* Pipeline YAML for [deploying a chart](https://github.com/codefresh-contrib/helm-sample-app/blob/master/codefresh-do-not-store.yml){:target="\_blank"} +* Pipeline YAML for [both storing and deploying a chart](https://github.com/codefresh-contrib/helm-sample-app/blob/master/codefresh.yml){:target="\_blank"} + +### Helm usage example: Installing a Helm Chart + +The following example includes the minimum configuration to install a Helm chart from a repository. For more configuration options, see the [Arguments reference](https://codefresh.io/steps/step/helm){:target="\_blank"}. + +```yaml +deploy: + type: helm + arguments: + action: install + chart_name: path/to/charts + release_name: first + helm_version: 3.0.3 + kube_context: my-kubernetes-context +``` + +### Helm usage example: Pushing a Helm Chart + +The following example illustrates how to package and push a Helm chart into a repository. + +```yaml +deploy: + type: helm + arguments: + action: push + chart_name: /codefresh/volume/repo/chart + chart_repo_url: 'cm://h.cfcr.io/useraccount/default' +``` + +> **Notes**: + - Assumes that a Git repository with the Helm chart files was cloned as a part of the pipeline. + - The Git repository contains the chart files in the `chart` directory. + - `chart_repo_url` is optional. If a [Helm repository configuration](#step-4-optional-import-the-helm-configuration-in-your-pipeline-definition) is attached to the pipeline, this setting is ignored. + +### Helm usage example: Authenticating only + +The following example illustrates the Helm mode for authentication only. + +```yaml +deploy: + type: helm + arguments: + action: auth + kube_context: my-kubernetes-context + commands: + - helm list +``` + +### Helm usage example: Custom Helm commands + +The following example illustrates executing custom Helm commands. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +my_custom_helm_command: + type: helm + arguments: + action: auth + kube_context: my-kubernetes-context + commands: + - source /opt/bin/release_chart + - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ + - helm repo add stable https://kubernetes-charts.storage.googleapis.com + - helm repo list + - helm repo update + - helm list +{% endraw %} +{% endhighlight %} + +> Notes: +- The directory that contains a chart MUST have the same name as the chart. Thus, a chart named `my-chart` MUST be created in a directory called `my-chart/`. This is a requirement of the [Helm Chart format](https://helm.sh/docs/chart_template_guide/). + +## Helm configuration fields + +Name|Required|Description +---|---|--- +action|Defaults to 'install'|Operation mode: `install`/`push`/`auth` +chart_name|required for install/push|Chart reference to use, adhering to Helm's lookup rules (path to chart folder, or name of packaged chart). There's no need to prefix with `/reponame` if referencing a chart in a repository, this is handled automatically. a.k.a `CHART_NAME` but `CHART_NAME` shouldn't be used anymore. +chart_repo_url|optional|Helm chart repository URL. If a [Helm repository configuration](#step-4-optional---import-the-helm-configuration-in-your-pipeline-definition) is attached to the pipeline, this setting is ignored. +chart_version|optional|Override or set the chart version. +cmd_ps|optional|When defined, Command Postscript is appended as is to the generated Helm command string. Can be used to set additional parameters supported by the command but not exposed as configuration options.| +commands|optional|Commands to execute in plugin after `auth` action. +custom_value_files|optional|Values file to provide to Helm as `--values` or `-f`.| +custom_values|optional|Values to provide to Helm as `--set` +helm_version|optional|Version of [cfstep-helm image](https://hub.docker.com/r/codefresh/cfstep-helm/tags){:target="\_blank"} +kube_context|required for install|Kubernetes context to use. The name of the cluster as [configured in Codefresh]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/). +namespace|optional|Target Kubernetes namespace to deploy to. +release_name|required for install|Helm release name. If the release exists, it is upgraded. +repos|optional|Array of custom repositories. + + +## Full Helm pipeline example + +The pipeline in this example builds a docker image, runs unit tests, stores the Helm chart in the Codefresh private Helm repository and finally deploys the Helm chart to a cluster. + +{% include image.html +lightbox="true" +file="/images/deployments/helm/full-helm-pipeline.png" +url="/images/deployments/helm/full-helm-pipeline.png" +alt="Helm pipeline" +caption="Helm pipeline" +max-width="90%" +%} + +This is the pipeline definition: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - build + - test +steps: + clone: + title: Cloning main repository... + stage: checkout + type: git-clone + arguments: + repo: 'codefresh-contrib/python-flask-sample-app' + revision: with-helm + git: github + MyAppDockerImage: + title: Building Docker Image + stage: build + type: build + working_directory: '${{clone}}' + arguments: + image_name: kostis-codefresh/python-flask-sample-app + tag: 'master' + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + stage: test + type: freestyle + working_directory: '${{clone}}' + arguments: + image: ${{MyAppDockerImage}} + commands: + - python setup.py test + StoreChart: + title: Storing Helm Chart + type: helm + stage: store + working_directory: ./python-flask-sample-app + arguments: + action: push + chart_name: charts/python + kube_context: kostis-demo@FirstKubernetes + DeployMyChart: + type: helm + stage: deploy + working_directory: ./python-flask-sample-app + arguments: + action: install + chart_name: charts/python + release_name: my-python-chart + helm_version: 3.0.2 + kube_context: kostis-demo@FirstKubernetes + custom_values: + - 'buildID=${{CF_BUILD_ID}}' + - 'image_pullPolicy=Always' + - 'image_tag=master' + - 'image_pullSecret=codefresh-generated-r.cfcr.io-cfcr-default' +{% endraw %} +{% endhighlight %} + +You can see the source code in our [example section]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/). + + +## Related articles +[Helm Charts and repositories]({{site.baseurl}}/docs/deployments/helm/add-helm-repository/) +[Using managed Helm repositories]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) +[Helm Promotion boards]({{site.baseurl}}/docs/deployments/helm/helm-environment-promotion) diff --git a/_docs/deployments/kubernetes/custom-kubectl-commands.md b/_docs/deployments/kubernetes/custom-kubectl-commands.md new file mode 100644 index 00000000..77bc7411 --- /dev/null +++ b/_docs/deployments/kubernetes/custom-kubectl-commands.md @@ -0,0 +1,184 @@ +--- +title: "Custom kubectl commands" +description: "Use kubectl in your Codefresh pipelines" +group: deployments +sub_group: kubernetes +toc: true +--- + +As explained in [Kubernetes deployment options]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/), Codefresh has built-in functionality for deploying to Kubernetes clusters. + +For maximum flexibility with cluster deployments, you can run your own custom `kubectl` commands in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +[Kubectl](https://kubernetes.io/docs/reference/kubectl/overview/){:target="\_blank"} is the command line interface for managing kubernetes clusters. + +Codefresh automatically sets up your [config context](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/){:target="\_blank"} with your connected clusters. + +The config context is automatically placed for you at the path of the [variable]({{site.baseurl}}/docs/pipelines/variables/) `$CF_KUBECONFIG_PATH`. +In the current Codefresh implementation, this expands to `/codefresh/volume/sensitive/.kube/config`, within the [shared step volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). + +When you use custom `kubectl` commands, it is your responsibility to template your manifests using any of the available options. To employ Codefresh for templating, it is better to use the dedicated [cf-deploy-kubernetes step]({{site.baseurl}}/docs/deployments/ci-cd-guides/kubernetes-templating/), which provides simple templating capabilities. + +## Using the Codefresh kubectl image + +Codefresh already offers a public Docker image with `kubectl` at [https://hub.docker.com/r/codefresh/kubectl/tags](https://hub.docker.com/r/codefresh/kubectl/tags){:target="\_blank"}. You can choose a specific version of `kubectl` with the appropriate tag or just select `latest` for the most up-to-date version. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyCustomKubectlCommands: + title: Running Kubectl + image: codefresh/kubectl:1.13.3 + commands: + - echo $CF_KUBECONFIG_PATH + - kubectl help +{% endraw %} +{% endhighlight %} + +If you run the pipeline, you can see the help options for `kubectl`. + +## Getting a config context + +The important thing to know when running custom `kubectl` commands is that Codefresh automatically sets up +your [kubeconfig files](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/){:target="\_blank"} for you with the cluster information present in [integrations]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster). + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/kube-context.png" +url="/images/deployments/kubernetes/kube-context.png" +alt="Codefresh cluster names" +caption="Codefresh cluster names" +max-width="50%" +%} + +If you run this pipeline, you will see the names of all your connected clusters: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyCustomKubectlCommands: + title: Running Kubectl + image: codefresh/kubectl + commands: + - kubectl config get-contexts +{% endraw %} +{% endhighlight %} + +With two sample clusters, the output of this pipeline is the following: + +``` +Running freestyle step: Running Kubectl +Pulling image codefresh/kubectl:latest +Status: Image is up to date for codefresh/kubectl:latest +NAME CLUSTER AUTHINFO NAMESPACE +gke-kostisdemo-codefresh-kostis gke-kostisdemo-codefresh-kostis gke-kostisdemo-codefresh-kostis default +kostis-demo@FirstKubernetes kostis-demo@FirstKubernetes kostis-demo@FirstKubernetes default + +``` + +You can modify the current config context and run any `kubectl` command you want applied to that context. The next pipeline will print all the nodes of the first cluster: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyCustomKubectlCommands: + title: Running Kubectl + image: codefresh/kubectl + commands: + - kubectl config get-contexts + - kubectl config use-context "gke-kostisdemo-codefresh-kostis" + - kubectl get nodes +{% endraw %} +{% endhighlight %} + +## Example of parallel deployment with kubectl + +Let's see a full example. In this pipeline, we will create two Docker images and deploy them on two separate clusters, using custom `kubectl` commands. We will also use the [parallel capability]({{site.baseurl}}/docs/pipelines/advanced-workflows/) of Codefresh pipelines. + +Here is the pipeline: + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/parallel-kubectl.png" +url="/images/deployments/kubernetes/parallel-kubectl.png" +alt="Parallel kubectl deployment" +caption="Parallel kubectl deployment" +max-width="100%" +%} + +And here is the complete `codefresh.yml`: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' + +stages: +- build +- deploy + +steps: + BuildingApps: + type: parallel + stage: 'build' + steps: + BuildingApp1: + title: Building App 1 + type: build + stage: build + image_name: nestjs-app + working_directory: ./my-nestjs-project/ + dockerfile: Dockerfile + BuildingApp2: + title: Building App 2 + type: build + stage: build + image_name: rails + working_directory: ./my-rails-project/ + dockerfile: Dockerfile + DeployingApps: + type: parallel + stage: 'deploy' + steps: + DeployApp1: + title: Deploying App 1 + stage: deploy + image: codefresh/kubectl + working_directory: ./my-nestjs-project/ + commands: + - kubectl config get-contexts + - kubectl config use-context "gke-kostisdemo-codefresh-kostis" + - kubectl apply -f service.yml deployment.yml + DeployApp2: + title: Deploying App 2 + stage: deploy + image: codefresh/kubectl + working_directory: ./my-rails-project/ + commands: + - kubectl config get-contexts + - kubectl config use-context "kostis-demo@FirstKubernetes" + - kubectl apply -f service.yml deployment.yml configmap.yml +{% endraw %} +{% endhighlight %} + +In the example above, we select one of the clusters in each deployment step, and then apply several Kubernetes manifests that constitute an application. + +## Related articles +[Managing your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Accessing a Docker registry]({{site.baseurl}}/docs/ci-cd-guides/access-docker-registry-from-kubernetes/) + + + + + + + + + + + \ No newline at end of file diff --git a/_docs/deployments/kubernetes/deployment-options-to-kubernetes.md b/_docs/deployments/kubernetes/deployment-options-to-kubernetes.md new file mode 100644 index 00000000..ed56a0b1 --- /dev/null +++ b/_docs/deployments/kubernetes/deployment-options-to-kubernetes.md @@ -0,0 +1,141 @@ +--- +title: "Deployment options for Kubernetes" +description: "Deploy to Kubernetes with the declarative deploy step" +group: deployments +sub_group: kubernetes +redirect_from: + - /docs/deploy-to-kubernetes/ + - /docs/deployment-to-kubernetes-quick-start-guide/ + - /docs/deploy-to-kubernetes/deployment-to-kubernetes-quick-start-guide/ + - /docs/deploy-to-kubernetes/get-ready-to-deploy/ +toc: true +--- + +Codefresh offers several options when it comes to Kubernetes deployments: + +1. Codefresh UI for on demand deployments + This is the easiest deployment option for Kubernetes. See our [Kubernetes quick start guide]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/). +1. Through a dedicated [deploy step]({{site.baseurl}}/docs/pipelines/steps/deploy/) in a pipeline + Described in this article. +1. Through the [cf-deploy-kubernetes step]({{site.baseurl}}/docs/ci-cd-guides/kubernetes-templating/) in a pipeline + Use this to also perform simple templating on Kubernetes manifests. +1. Through a [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) step with [Kustomize](https://kustomize.io){:target="\_blank"}. + See [Deployment with Kustomize]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-with-kustomize). +1. Using a [freestyle]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) step with your own `kubectl` commands + This deployment option gives you great flexibility, but assumes that you know how to work with `kubectl`. See [Custom kubectl commands]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/). +1. Using Helm as a package manager + See our [Helm quick start guide]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/). + +## Prerequisites + +* A K8s cluster in Codefresh (see [Connecting a Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) +* Familiarity with the [Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/), basic [pipeline steps ]({{site.baseurl}}/docs/pipelines/steps/), and how to describe them +* [Integrate your Docker registry]({{site.baseurl}}/docs/integrations/docker-registries/) with Codefresh + +## Build and push your image +Here is a basic Codefresh pipeline scenario to build and push your image to Dockerhub registry. + + `YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildImage: + type: build + image_name: '/' #specify your future image reference here + dockerfile: Dockerfile + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + + PushToDockerRegistry: + type: push + candidate: '${{BuildImage}}' + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + registry: 'dockerhub' #the name of the registry you added to Codefresh +{% endraw %} +{% endhighlight %} + +Using this YAML example, we'll add an additional step to deploy the image in Dockerhub to Kubernetes. + +## Describe your deployment +The following instructions describe how to create a new service in your Kubernetes cluster in order to deploy to it. +>If you're deploying to an existing service in your Kubernetes cluster, please skip to the [next step]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/#add-a-deployment-step). + + + 1. Go to the **`Kubernetes` → `Services page`**. + 1. Click the button **“Add Service”**. + 1. Select the **cluster**. + 1. Select the **namespace**. + 1. Type an arbitrary **service name**. + 1. Specify the **number of replicas**. + 1. Type the name of your **pushed image**. + 1. In the **“Internal Ports”** field specify the port which your application listens to. + 1. In the **“Expose port”** field specify the port to be exposed to the Internet and check the checkbox. + 1. Click the button **“Deploy”** to deploy the application. + +Wait until the deployment is completed, and you can open the deployed application in your browser by clicking on the "endpoint" link. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/describe-k8s-deployment.png" +url="/images/deployments/kubernetes/describe-k8s-deployment.png" +alt="Describe Kubernetes deployment" +caption="Describe Kubernetes deployment" +max-width="60%" +%} + +## Add a Deployment step +So now you have deployed your image manually, which is great. But how to trigger the deployment within your pipeline? For that you will need to add a step of a “Deploy” type to the Codefresh YAML manifest file: + + `YAML` +{% highlight yaml %} +{% raw %} +RunningDeployScript: + title: Running Deploy Script + type: deploy + kind: kubernetes + cluster: '' #the name specified when you added the cluster + namespace: #the namespace you wish to deploy into + service: #the service you would like to update the deployment in + candidate: + image: '${{BuildImage}}' + registry: 'dockerhub' +{% endraw %} +{% endhighlight %} + +The full Codefresh YAML looks like this: + + `YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildImage: + type: build + image_name: '/' + dockerfile: Dockerfile + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + + PushToDockerRegistry: + type: push + candidate: '${{BuildImage}}' + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + registry: 'dockerhub' #the name of the registry you added to Codefresh + + RunningDeployScript: + title: Running Deploy Script + type: deploy + kind: kubernetes + cluster: '' #the name specified when you added the cluster + namespace: #the namespace you wish to deploy into + service: #the service you would like to update the deployment in + candidate: + image: '${{BuildImage}}' + registry: 'dockerhub' +{% endraw %} +{% endhighlight %} + +You can now run the whole pipeline that builds your application from source to a docker image, pushes it to a docker registry and deploys it to your Kubernetes cluster. + +## Related articles +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) +[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) diff --git a/_docs/deployments/kubernetes/manage-kubernetes.md b/_docs/deployments/kubernetes/manage-kubernetes.md new file mode 100644 index 00000000..986fb08d --- /dev/null +++ b/_docs/deployments/kubernetes/manage-kubernetes.md @@ -0,0 +1,169 @@ +--- +title: "Managing Kubernetes clusters" +description: "Use the graphical Kubernetes dashboard in Codefresh" +group: deployments +sub_group: kubernetes +redirect_from: + - /docs/deploy-to-kubernetes/codefresh-kubernetes-integration-beta/ + - /docs/codefresh-kubernetes-integration-beta/ +toc: true +--- + +Codefresh includes a built-in Kubernetes Dashboard that allows you to see the state of your clusters, and even make changes if you have the appropriate access privileges. + +## Accessing the Kubernetes Dashboard + +After [adding a cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster), you will be able to manage your Kubernetes assets via the *Kubernetes tab* on the left pane. Clicking on the Kubernetes icon will take you to your services dashboard. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/kubernetes-dashboard.png" +url="/images/deployments/kubernetes/kubernetes-dashboard.png" +alt="Codefresh Kubernetes Dashboard" +caption="Codefresh Kubernetes Dashboard" +max-width="80%" + %} + +With the graphical dashboard it is very easy to locate problematic services or deploy new ones quickly. If there are clusters that are not accessible to your user you can hide them by enabling the *Hide inaccessible clusters* option at the top right of the window in order to simplify the view. + +## Viewing your Kubernetes services + +If you have too many clusters you can choose the *add filter* button at the top of the window to hide specific clusters or namespaces. + +You will be able to see the following parameters for each service: +* Name +* Cluster +* Namespace +* Replica count +* Docker image +* Selector +* A status check + +You can also switch to a Grid view if you prefer that over the default List view: + + +{% include image.html +lightbox="true" +file="/images/kubernetes/dashboard/grid-view.png" +url="/images/kubernetes/dashboard/grid-view.png" +alt="Kubernetes Dashboard grid view" +caption="Kubernetes Dashboard grid view" +max-width="80%" + %} + + If there are clusters that are not accessible to your user you can hide them by enabling the *Hide inaccessible clusters* option at the top right of the window in order to simplify the view. + + +## Work with your services + +In this view, you will be able to perform the following actions: + +* Add new service +* Edit/Update existing services +* Remove service + + +## Deploying a new service + +The Kubernetes dashboard provides a GUI dialog to quickly deploy new services in your cluster. + +### Choose a Docker image + +To add a service, click the "Add Service" button on the top or the "plus" button on a specific namespace. Then fill in the details for your new service. + +You can add images built in Codefresh which were pushed to Codefresh registry or provide a name for Docker image that will be pulled from an [external Docker registry]({{site.baseurl}}/docs/integrations/docker-registries/). Notice that images which are not from Dockerhub must be mentioned with their full domain name. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/quick-ui-deploy.png" +url="/images/deployments/kubernetes/quick-ui-deploy.png" +alt="Deploying with the quick UI dialog" +caption="Deploying with the quick UI dialog" +max-width="60%" +%} + + +Use the following steps in order to add Image and pull secrets from the [connected Docker Registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/): +* Specify the image name in the format `//:` +* Provide and image pull secret - this will be done for each namespace + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/deploying-private-cf-registry.png" +url="/images/deployments/kubernetes/deploying-private-cf-registry.png" +alt="Deploying from the private Codefresh registry" +caption="Deploying from the private Codefresh registry" +max-width="60%" +%} + + +From this screen you can also [create Kubernetes image secrets]({{site.baseurl}}/docs/ci-cd-guides/access-docker-registry-from-kubernetes/) without actually deploying anything. + + +### Set environment variables and resources + +You can add extra environment variables that will passed to the deployment image. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/environment-variables-deployment.png" +url="/images/deployments/kubernetes/environment-variables-deployment.png" +alt="Environment variables for the deployment" +caption="Environment variables for the deployment" +max-width="60%" +%} + + + +You can also define resource limits for your pods. +It is a good practice to place maximum limits so that your services do not experience resource starvation. + + +### Adding a service with a manifest file + +If you are an advanced Kubernetes user, toggle the Deployment option button to the `YAML` position on the top right corner of the screen. +In this mode you can define exactly the contents for the service and deployment Kubernetes resources. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/define-k8s-service-resource.png" +url="/images/deployments/kubernetes/define-k8s-service-resource.png" +alt="Define a Kubernetes Service Resource" +caption="Define a Kubernetes Service Resource" +max-width="60%" +%} + +You can type directly in the browser window or paste content from a text editor. + +{% include image.html +lightbox="true" +file="/images/deployments/kubernetes/define-k8s-deployment-resource.png" +url="/images/deployments/kubernetes/define-k8s-deployment-resource.png" +alt="Define a Kubernetes Deployment Resource" +caption="Define a Kubernetes Deployment Resource" +max-width="60%" +%} + + +Congratulations! Your service is now deployed to your Kubernetes cluster. + +You can update an existing service in a similar manner from your Kubernetes services window - Just hit the "edit" icon and update your service using the same steps as in "Add new service" section. + +## Automate your deployment + +After your service is deployed to your Kubernetes cluster, you can automate image deployment using Codefresh pipelines. + +Some of the possible options are: + +1. The dedicated [deploy step]({{site.baseurl}}/docs/pipelines/steps/deploy/) in a pipeline. +1. The [cf-deploy-kubernetes step]({{site.baseurl}}/docs/ci-cd-guides/kubernetes-templating/) in a pipeline. This can also perform simple templating on Kubernetes manifests. + +See more choices in the [Deployment options page]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes/). + +## Related articles +[Environment dashboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) +[Add Config Maps]({{site.baseurl}}/docs/ci-cd-guides/add-config-maps-to-your-namespaces/) +[Kubernetes deployment quick start]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) + + + diff --git a/_docs/example-catalog/cd-examples/amazon-ecs.md b/_docs/example-catalog/cd-examples/amazon-ecs.md new file mode 100644 index 00000000..89905c22 --- /dev/null +++ b/_docs/example-catalog/cd-examples/amazon-ecs.md @@ -0,0 +1,155 @@ +--- +title: "Amazon ECS/Fargate" +description: "Use Codefresh to deploy Docker containers to ECS/Fargate" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/amazon-ecs/ + - /docs/deploy-your-containers/ + - /docs/deploy-your-containers/amazon-ecs/ +toc: true +--- +Codefresh can deploy to any ECS or Fargate cluster created in Amazon. + +{% include image.html +lightbox="true" +file="/images/examples/amazon-ecs/ecs-pipeline-deployment.png" +url="/images/examples/amazon-ecs/ecs-pipeline-deployment.png" +alt="Deploying to Amazon ECS" +caption="Deploying to Amazon ECS" +max-width="100%" +%} + +## Prerequisites + + +1. Configure an ECS (or Fargate) Cluster with at least one running instance. +1. Configure an ECS Service and Task Definition with a reference to **the image that you are going to build and push.** See [the official amazon docs](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html) for more details. +1. Connect your [ECR to Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/amazon-ec2-container-registry/) so that it can be used by name in Codefresh pipelines. +1. Verify you have AWS Credentials (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`), with the following privileges: + + `JSON` +{% highlight json %} +{% raw %} +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1479146904000", + "Effect": "Allow", + "Action": [ + "ecs:DescribeServices", + "ecs:DescribeTaskDefinition", + "ecs:DescribeTasks", + "ecs:ListClusters", + "ecs:ListServices", + "ecs:ListTasks", + "ecs:RegisterTaskDefinition", + "ecs:UpdateService" + ], + "Resource": [ + "*" + ] + } + ] +} +{% endraw %} +{% endhighlight %} + + + +## Create a CI/CD pipeline for ECS/Fargate + +Here is the complete pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - "clone" + - "build" + - "deploy" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}" + revision: "${{CF_BRANCH}}" + stage: "clone" + git: github + BuildingDockerImage: + stage: "build" + title: Building Docker Image + type: build + image_name: ${{IMAGE}} + tag: '${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile.multistage + Push: + title: "Pushing image to ECR" + stage: "deploy" + type: "push" + tag: '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + registry: "ecr" + candidate: "${{BuildingDockerImage}}" + DeployToFargate: + stage: "deploy" + image: codefreshplugins/cf-deploy-ecs + commands: + - cfecs-update ${{REGION}} ${{ECS_CLUSTER_NAME}} ${{ECS_SERVICE_NAME}} --image-name ${{IMAGE_PREFIX}}/${{IMAGE}} --image-tag '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + environment: + - AWS_ACCESS_KEY_ID=${{AWS_ACCESS_KEY_ID}} + - AWS_SECRET_ACCESS_KEY=${{AWS_SECRET_ACCESS_KEY}} + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code with a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +1. Uses a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create a Docker image +1. Uses a [push step]({{site.baseurl}}/docs/cpipelines/steps/push/) to push the docker image to ECR. The registry was previously [connected in Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) with the `ecr` identifier. +1. Runs `codefreshplugins/cf-deploy-ecs` to perform the actual deployment + + +The pipeline needs [environment variables]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) that hold all the required parameters. + +{% include image.html +lightbox="true" +file="/images/examples/amazon-ecs/ecs-variables.png" +url="/images/examples/amazon-ecs/ecs-variables.png" +alt="ECS environment variables" +caption="ECS environment variables" +max-width="80%" +%} + + + + +Note that the **`--image-name`** and **`--image-tag`** pair should comprise the **full name** of the image that was pushed to the registry (including the registry name) in order to be correctly referred by the corresponding Task Definition. + + + +## Deployment Flow + +The `codefreshplugins/cf-deploy-ecs` step performs the following: + + +1. Gets the ECS service by specified `aws-region`, `ecs-cluster`, and `service-names`. +1. Creates a new revision from the current task definition of the service. If `--image-name` and `--image-tag` are provided, it replaces the image tag. +1. Runs the `update-service` command with the new task definition revision. +1. Waits for the deployment to complete. + * Deployment is successfully completed if `runningCount == desiredCount` for PRIMARY deployment - see `aws ecs describe-services` + * The `cfecs-update` command exits with a timeout error if after --timeout (default = 900s) `runningCount` does not equal `desiredCount` + * The `cfecs-update` exits with an error if --max-failed (default = 2) or more ECS tasks were stopped with error for the task definition that you are deploying. ECS continuously retries failed tasks. + +You can also find the same step in the form of a [Codefresh plugin](https://codefresh.io/steps/step/ecs-deploy). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[External Registries]({{site.baseurl}}/docs/integration/docker-registries/) + + diff --git a/_docs/example-catalog/cd-examples/deploy-to-heroku.md b/_docs/example-catalog/cd-examples/deploy-to-heroku.md new file mode 100644 index 00000000..100a56ec --- /dev/null +++ b/_docs/example-catalog/cd-examples/deploy-to-heroku.md @@ -0,0 +1,212 @@ +--- +title: "Deploy to Heroku" +description: "Deploy your application or image to Heroku" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +Heroku is a container-based cloud PaaS (Platform as a Service) software that allows you to deploy, run, and manage your applications. Built on top of AWS, it supports Ruby, Node.js, Java, Python, Clojure, Scala, Go and PHP. + +This tutorial will cover two examples, depending on your use case. If you are not using containers, your use case is covered using the Codefresh heroku-deployer plugin ([Example #1](#pipeline-example-1-deploying-source-code-to-heroku-using-the-codefresh-heroku-plugin)). If you are using containers, you can achieve deployment by using a combination of build, push, and freestyle steps ([Example #2](#pipeline-example-2-deploy-a-docker-image-to-heroku)). + +## Example Django Application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/heroku-python-django-sample-app). + +The repository contains a Django starter project with the following commands: + +- `pip install -r requirements.txt` to install dependencies. +- `python -m unittest composeexample.utils` runs unit tests. +- `python manage.py runserver 0.0.0.0:8000` to start the application locally. + +Once launched the application presents the Django starter page at localhost:8000. + +## Pipeline Example #1: Deploying Source Code to Heroku Using the Codefresh Heroku Plugin + +### Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/#create-a-codefresh-account/) +- A [free Heroku account](https://signup.heroku.com){:target="\_blank"} +- A Heroku API token (you can find this under **Account Settings** and then scrolling down, you will find the API Key) + +### Create the pipeline + +This pipeline has three stages: clone, test, and deploy. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-deployer-pipeline.png" +url="/images/examples/deployments/heroku-deployer-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI. It will automatically clone the project for you. + +Note that you need to change the environment variables in the deploy stage to your respective values. You can do this directly [in the YAML itself]({{site.baseurl}}/docs/how-to-guides/migrating-from-travis-ci/#environment-variables), or through the Codefresh UI. Navigate to the in-line editor, and to the right you will find a tab lebeled **Variables**. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-deployer-variables2.png" +url="/images/examples/deployments/heroku-deployer-variables2.png" +alt="Codefresh UI Pipeline Variables View" +caption="Codefresh UI Pipeline Variables View" +max-width="100%" +%} + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - clone + - test + - deploy +steps: + clone: + title: "Cloning main repository..." + stage: "clone" + type: "git-clone" + arguments: + repo: "codefresh-contrib/heroku-python-django-sample-app" + revision: "master" + git: "github" + run_unit_tests: + title: "Running unit tests..." + stage: "test" + type: "freestyle" + working_directory: "${{clone}}" + arguments: + image: "python:3.6-slim" + commands: + - "pip install -r requirements.txt --cache-dir=/codefresh/volume/pip-cache" + - "python -m unittest composeexample.utils" + deploy_to_heroku: + title: "Deploying to Heroku..." + stage: "deploy" + type: "heroku-deployer" + arguments: + APP_NAME: $APP_NAME + EMAIL: $EMAIL + API_TOKEN: $API_TOKEN +{% endraw %} +{% endhighlight %} + +The above pipeline has the following steps: + +1. A [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step that clones the main repository +2. A [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that installs dependencies and runs the unit tests +3. A freestyle step that deploys the application to Heroku using the heroku-deployer plugin from the [Step Marketplace](https://codefresh.io/steps/step/heroku-deployer) + +## Pipeline Example #2: Deploy a Docker Image to Heroku + +This example differs from the plugin usage, as it deploys a built Docker image to Heroku. + +Note that you need to change the environment variables to your respective values. You can do this directly [in the YAML itself]({{site.baseurl}}/docs/how-to-guides/migrating-from-travis-ci/#environment-variables), or through the Codefresh UI. Navigate to the in-line editor, and to the right you will find a tab lebeled **Variables**. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-deployer-variables.png" +url="/images/examples/deployments/heroku-deployer-variables.png" +alt="Codefresh UI Pipeline Variables View" +caption="Codefresh UI Pipeline Variables View" +max-width="100%" +%} + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/#create-a-codefresh-account/) +- A [free Heroku account](https://signup.heroku.com){:target="\_blank"} +- An empty repository already created in Heroku using the `heroku create ` command +- A Heroku registry [connected to Codefresh]({{site.baseurl}}/docs/docker-registries/external-docker-registries/other-registries/#heroku-registries) +- A Heroku API token (you can find this under **Account Settings** and then scrolling down, you will find the API Key) + +### Create the pipeline + +This pipeline has five stages: clone, build, test, push, and release. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/heroku-vanilla-push-pipeline.png" +url="/images/examples/deployments/heroku-vanilla-push-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI. It will automatically clone the project for you. + +`codefresh-heroku-push-image.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +version: '1.0' +stages: + - clone + - build + - test + - push + - release +steps: + clone: + title: "Cloning main repository..." + stage: "clone" + type: "git-clone" + arguments: + repo: "codefresh-contrib/heroku-python-django-sample-app" + revision: "master" + git: "github" + build: + title: "Building Docker Image..." + stage: "build" + type: "build" + working_directory: "./heroku-python-django-sample-app" + arguments: + image_name: "${{IMAGE_NAME}}" + tag: "master" + dockerfile: "Dockerfile" + run_unit_tests: + title: "Running unit tests..." + stage: "test" + type: "freestyle" + working_directory: "./heroku-python-django-sample-app" + arguments: + image: '${{build}}' + commands: + - "python -m unittest composeexample.utils" + push_image: + title: "Pushing image to Heroku..." + stage: "push" + type: "push" + arguments: + candidate: '${{build}}' + image_name: "${{IMAGE_NAME}}/web" + registry: "heroku" + release_image: + title: "Releasing image..." + stage: "release" + type: "freestyle" + arguments: + image: "nazarcodefresh/heroku-cli:alpine" + commands: + - >- + printf "machine api.heroku.com\n login $EMAIL\n password + $API_TOKEN\nmachine git.heroku.com\n login $EMAIL\n password + $API_TOKEN\n" > ~/.netrc + - "heroku container:release --app $IMAGE_NAME web" +{% endraw %} +{% endhighlight %} + +The pipeline does the following: +1. Clones the main repository through the [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds our Docker image through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Runs unit tests on our Docker image through another freestyle step. +1. Pushes to the Heroku registry through a [push step]({{site.baseurl}}/docs/pipelines/steps/push/). +1. Releases the Docker image through another freestyle step. + + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp.md b/_docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp.md new file mode 100644 index 00000000..fa66c1c2 --- /dev/null +++ b/_docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp.md @@ -0,0 +1,122 @@ +--- +title: "Deploy to a VM via SCP" +description: "Deploy your application to Tomcat using SCP" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/create-codefresh-account) +- A distribution of [Tomcat](https://tomcat.apache.org/download-90.cgi){:target="\_blank"} setup on a remote server (running with port 8080 exposed) + +## The example Java Application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/scp-war-app){:target="\_blank"}. + +The example application is a simple Hello World Java application using the [Spark Java framework](http://sparkjava.com/){:target="\_blank"}: + +{% include image.html +lightbox="true" +file="/images/examples/deployments/scp-hello-world.png" +url="/images/examples/deployments/scp-hello-world.png" +alt="Hello World!" +caption="Hello World!" +max-width="100%" +%} + + +```java + @Override + public void init() { + get("/hello", (req, res) -> "Hello World"); + } +``` + +## Create the pipeline + +Our pipeline has three stages: clone, package, and transfer. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/scp-pipeline.png" +url="/images/examples/deployments/scp-pipeline.png" +alt="SCP pipeline" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI. It will automatically clone the project for you. + +Note that you need to change the environment variables under the `transfer` step to your respective values. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/example + +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "package" + - "transfer" + +steps: + clone: + title: "Cloning repository..." + type: "git-clone" + stage: "clone" + arguments: + repo: "codefresh-contrib/scp-war-app" + + package: + title: "Packaging war..." + type: "freestyle" + stage: "package" + arguments: + image: "maven:3.5.2-jdk-8-alpine" + working_directory: "${{clone}}" + commands: + - "mvn -Dmaven.repo.local=/codefresh/volume/m2_repository clean package" + + transfer: + title: "Transferring war to Tomcat..." + type: "freestyle" + stage: "transfer" + arguments: + image: "ictu/sshpass:latest" + working_directory: "${{package}}/target" + environment: + - USER= + - HOST= + - PASSWORD= + - TOMCAT_DIR= + commands: + - "echo | ssh-keygen -P '' -t rsa" + - "sshpass -p $PASSWORD ssh-copy-id -i /root/.ssh/id_rsa.pub -o StrictHostKeyChecking=no $USER@$HOST" + - "scp sparkjava-hello-world-1.0.war $USER@$HOST:$TOMCAT_DIR" +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: + +1. Clones the main repository through the [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Installs the dependencies via Maven and packages our `war` file through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Transfers our application via scp to a Tomcat server through another freestyle step. + +Note that you will need to change the listed environment variables accordingly, either through the YAML itself, or through your pipeline settings: + +{% include image.html +lightbox="true" +file="/images/examples/deployments/scp-variables.png" +url="/images/examples/deployments/scp-variables.png" +alt="Pipeline variables" +caption="Pipeline variables" +max-width="100%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/deploy-with-kustomize.md b/_docs/example-catalog/cd-examples/deploy-with-kustomize.md new file mode 100644 index 00000000..21b3089f --- /dev/null +++ b/_docs/example-catalog/cd-examples/deploy-with-kustomize.md @@ -0,0 +1,244 @@ +--- +title: "Deploy with Kustomize" +description: "Deploy your services to Kubernetes using Kustomize" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Kustomize](https://kustomize.io) is a tool included with kubectl 1.14 that "lets you customize raw, template-free YAML files for multiple purposes, leaving the original YAML untouched and usable as is." + +Kustomize is more of an overlay engine, as opposed to a templating engine. You create a base configuration and overlays. Your overlays contain a *kustomization.yaml* file, and any variants/changes are applied over top of the base configuration. Kustomize does not use templates at all. + +While it is good for simple scenarios, we suggest that you use Helm for managing your Kubernetes applications. Helm is a full package manager for Kubernetes manifests that also provides templating capabilities. See [this example]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/){:target="\_blank"} for more information. + +## The example application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/kustomize-sample-app){:target="\_blank"}. + +The sample application is a simple Spring Boot web app, that displays an environment variable, `MY_MYSQL_DB` on the page: + +```java +public class HelloController { + + String my_sql_db = System.getenv("MY_MYSQL_DB"); + + @RequestMapping("/") + public String index() { + return my_sql_db; + } +``` + +The project contains a [base](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#base){:target="\_blank"} and two [overlays](https://github.com/kubernetes-sigs/kustomize/blob/master/docs/glossary.md#overlay){:target="\_blank"}, one for a staging environment and one for production. + +The base manifest holds a dummy variable for `MY_MYSQL_DB` which will be overlayed once we call the kustomize command in our pipeline. + +`base/deployment.yaml` +```yaml +... + env: + - name: MY_MYSQL_DB + valueFrom: + configMapKeyRef: + name: the-map + key: mysqlDB +``` + +We will overlay on top of the manifests a different value for `MY_MYSQL_DB` for the staging environment and production environment. + +`overlays/staging/config-map.yaml` +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: the-map +data: + mysqlDB: "staging-mysql.example.com:3306" +``` + +`overlays/production/config-map.yaml` +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: the-map +data: + mysqlDB: "prod-mysql.example.com:3306" +``` + +In addition, for the production environment, the number of replicas will be overlayed to 3 instead of 1 (as [defined in the base deployment](https://github.com/codefresh-contrib/kustomize-sample-app/blob/32e683f82940de0bf2de2da40fa6b150e2b24b23/base/deployment.yaml#L8)){:target="\_blank"}. + +`overlays/production/deployment.yaml` +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: the-deployment +spec: + replicas: 3 +``` + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-user-management/create-codefresh-account) + +- A Kubernetes cluster [connected to your Codefresh account](https://codefresh.io/docs/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) + +## Create the staging environment pipeline + +This pipeline will have two stages: clone and deploy. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-staging-pipeline.png" +url="/images/examples/deployments/k8s-kustomize-staging-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line pipeline editor of the Codefresh UI. However, make sure to replace cluster context for the kubectl command under the arguments section with your own that you integrated with Codefresh. It will automatically clone the project for you and deploy. + +`staging-codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ + +version: "1.0" +# Stages can help you organize your steps in stages + +stages: + - clone + - deploy + +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: clone + arguments: + repo: https://github.com/codefresh-contrib/kustomize-sample-app.git + git: github + revision: master + + deploy: + title: Deploying to Staging using Kustomize... + type: freestyle + stage: deploy + working_directory: ${{clone}} + arguments: + image: codefresh/kubectl:1.14.9 + commands: + - kubectl config use-context anna-sandbox@codefresh-support + - kubectl apply -k overlays/staging +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: +1. Clones the main repository through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Connects to our Kubernetes cluster we have integrated with Codefresh using `kubectl`, and deploys the application as a staging environment with the appropriate value for `MY_MYSQL_DB` as defined in our configMap using Kustomize (the `-k` flag), through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +>If you are using `kubectl` prior to 1.14, you can use the following command to deploy with Kustomize: + `kustomize build overlays/production | kubectl apply -f` + +## Create the production environment pipeline + +Likewise, this pipeline will have two stages: clone and deploy. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-prod-pipeline.png" +url="/images/examples/deployments/k8s-kustomize-prod-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor of the Codefresh UI and remember to replace cluster context for the kubectl command again with your own. Click Save and Run and it will automatically clone the project for you. + +`prod-codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ + +version: "1.0" +# Stages can help you organize your steps in stages + +stages: + - clone + - deploy + +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: clone + arguments: + repo: https://github.com/codefresh-contrib/kustomize-sample-app.git + git: github + revision: master + + deploy: + title: Deploying to Production using Kustomize... + type: freestyle + stage: deploy + working_directory: ${{clone}} + arguments: + image: codefresh/kubectl:1.14.9 + commands: + - kubectl config use-context anna-sandbox@codefresh-support + - kubectl apply -k overlays/production +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: + +1. Clones the main repository through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Connects to our Kubernetes cluster we have integrated with Codefresh using `kubectl`, and deploys the application as a staging environment with the appropriate value for `MY_MYSQL_DB` as defined in our configMap using Kustomize (the `-k` flag), through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + +>Note that if you are using kubectl prior to 1.14, you can use the following command to deploy with Kustomize: +>`kustomize build overlays/production | kubectl apply -f` + +## Verification + +After you run these pipelines, your deployments are displayed in the [Kubernetes dashboard]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#accessing-the-kubernetes-dashboard). + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-dashboard.png" +url="/images/examples/deployments/k8s-kustomize-dashboard.png" +alt="Codefresh Kubernetes Deployments" +caption="Codefresh Kubernetes Deployments" +max-width="100%" +%} + +You can test that the application deployed correctly to both environments by accessing the endpoints: + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-staging-endpoint.png" +url="/images/examples/deployments/k8s-kustomize-staging-endpoint.png" +alt="Staging endpoint" +caption="Staging endpoint" +max-width="100%" +%} + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-kustomize-prod-endpoint.png" +url="/images/examples/deployments/k8s-kustomize-prod-endpoint.png" +alt="Production endpoint" +caption="Production endpoint" +max-width="100%" +%} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Deployment options to Kubernetes]({{site.baseurl}}/docs/deployments/kubernetes/deployment-options-to-kubernetes) +[Running custom kubectl commands]({{site.baseurl}}/docs/deployments/kubernetes/custom-kubectl-commands/) +[Deploy with Helm]({{site.baseurl}}/docs/example-catalog/cd-examples/helm/) + diff --git a/_docs/example-catalog/cd-examples/docker-swarm.md b/_docs/example-catalog/cd-examples/docker-swarm.md new file mode 100644 index 00000000..ad6dfbe1 --- /dev/null +++ b/_docs/example-catalog/cd-examples/docker-swarm.md @@ -0,0 +1,221 @@ +--- +title: "Deploy to Docker SWARM" +description: "Deploy to Docker Swarm with Codefresh" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/docker-swarm/ + - /docs/deploy-to-docker-swarm/ + - /docs/deploy-your-containers/docker-swarm/ +toc: true +--- + +Codefresh can easily deploy your application to [Docker Swarm](https://docs.docker.com/engine/swarm/){:target="\_blank"} using [Codefresh pipelines]({{site.baseurl}}/docs/pipelines/pipelines/). + +You will need to provide: + +1. The `docker-stack.yml` that contains the definition of the application +1. The host where your Docker Swarm is running +1. An SSH key that Codefresh can use to access remotely the Docker Swarm host +1. The stack name that will be used once the application is deployed + +All this information will be passed to the pipeline in the form of build parameters. + + +## Example application + +For an example Docker Swarm application, see [https://github.com/codefreshdemo/example-voting-app](https://github.com/codefreshdemo/example-voting-app){:target="\_blank"} + +To launch it locally you need to download [Docker](https://www.docker.com/products/overview){:target="\_blank"}. +If you are on Mac or Windows, [Docker Compose](https://docs.docker.com/compose){:target="\_blank"} is automatically installed. +On Linux, make sure you have the latest version of [Compose](https://docs.docker.com/compose/install/){:target="\_blank"}. + + +Run in this root directory: + +{% highlight bash %} +{% raw %} + +docker-compose up + +{% endraw %} +{% endhighlight %} + +The app runs at [http://localhost:5000](http://localhost:5000), and the results are at [http://localhost:5001](http://localhost:5001). + +Alternately, if you want to run it on a Docker Swarm, first make sure you have a Swarm. +If you don't, run: + +{% highlight bash %} +{% raw %} + +docker swarm init + +{% endraw %} +{% endhighlight %} + +Once you have your swarm, in this directory run: + +{% highlight bash %} +{% raw %} + +docker stack deploy --compose-file docker-stack.yml vote + +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_warning}} +The swarm master must have Python installed. +{{site.data.callout.end}} + +## Deploy to Remote Swarm with Codefresh + +First you need to set up the following environment variables in your Codefresh pipeline: + +{: .table .table-bordered .table-hover} +| `RDOCKER_HOST` | remote Docker Swarm master machine, accessible over SSH (for example, ubuntu@ec2-public-ip) | +| `STACK_NAME` | is new Docker stack name (use \"vote\", for example) | +| `SSH_KEY` | private SSH key, used to access Docker Swarm master machine | +| `SPLIT_CHAR` | split character, you've used to replace `newline` in SSH key. Recommendation: use `,` (`comma` character). | + +The `SSH_KEY` variable has the contents of the [SSH key](https://www.ssh.com/ssh/public-key-authentication){:target="\_blank"} that can access the Docker Swarm host. Currently, in order to pass SSH key through Codefresh UI, you need to convert it to single line string (replacing `newline` with `comma`), like this: + +{% highlight bash %} +{% raw %} +SSH_KEY=$(cat ~/.ssh/my_ssh_key_file | tr '\n' ',') +{% endraw %} +{% endhighlight %} + +The `SPLIT_CHAR` variable should hold the replacement character that was used for the SSH key (in the example above it is the comma character) + +{% include image.html +lightbox="true" +file="/images/2f1884a-codefresh_env_vars.png" +url="/images/2f1884a-codefresh_env_vars.png" +alt="Docker Swarm build parameters" +caption="Docker Swarm build parameters" +max-width="70%" +%} + + +## Deploy to Docker Swarm with a YAML step + +Once you have defined all the variables, deploy to your cluster using the following [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + +deploy_to_swarm: + image: codefresh/remote-docker + working_directory: ${{main_clone}} + commands: + - rdocker ${{RDOCKER_HOST}} docker stack deploy --compose-file docker-stack.yml ${{STACK_NAME}} + environment: + - SSH_KEY=${{SSH_KEY}} + when: + branch: + only: + - master + +{% endraw %} +{% endhighlight %} + +You can also pass custom credentials like this: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} + +deploy_to_swarm: + image: codefresh/remote-docker + working_directory: ${{main_clone}} + commands: + - rdocker ${{RDOCKER_HOST}} docker login ${{MY_REGISTRY}} -u ${{MY_REGISTRY_USER}} -p ${{MY_REGISTRY_PASSWORD}} \&\& docker stack deploy --compose-file docker-compose.yml --with-registry-auth ${{STACK_NAME}} + environment: + - SSH_KEY=${{SSH_KEY}} + when: + branch: + only: + - master +{% endraw %} +{% endhighlight %} + + + +## Create a CI/CD pipeine for Docker Swarm + +Here is the complete pipeline: + +{% include +image.html +lightbox="true" +file="/images/examples/docker-swarm/docker-swarm-pipeline.png" +url="/images/examples/docker-swarm/docker-swarm-pipeline.png" +alt="Docker Swarm pipeline" +caption="Docker Swarm pipeline" +max-width="100%" +%} + +And here is the pipeline definition: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefreshdemo/example-voting-app' + revision: master + git: github-1 + MyResultDockerImage: + title: Building Result Docker Image + stage: build + type: build + image_name: resultApp + working_directory: ./result/ + tag: master + dockerfile: Dockerfile + MyVoteDockerImage: + title: Building Vote Docker Image + stage: build + type: build + image_name: voteApp + working_directory: ./vote/ + tag: master + dockerfile: Dockerfile + MyWorkerDockerImage: + title: Building Worker Docker Image + stage: build + type: build + image_name: workedApp + working_directory: ./worker/ + tag: master + dockerfile: Dockerfile + DeployToSwarmNow: + image: codefresh/remote-docker + working_directory: ${{main_clone}} + stage: deploy + commands: + - rdocker ${{RDOCKER_HOST}} docker login ${{MY_REGISTRY}} -u ${{MY_REGISTRY_USER}} -p ${{MY_REGISTRY_PASSWORD}} \&\& docker stack deploy --compose-file docker-compose.yml --with-registry-auth ${{STACK_NAME}} + environment: + - SSH_KEY=${{SSH_KEY}} +{% endraw %} +{% endhighlight %} + +The values of `MY_REGISTRY`, `MY_REGISTRY_USER` and `MY_REGISTRY_PASSWORD` depend upon the type of [your connected registry]({{site.baseurl}}/docs/integration/docker-registries/). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + diff --git a/_docs/example-catalog/cd-examples/elastic-beanstalk.md b/_docs/example-catalog/cd-examples/elastic-beanstalk.md new file mode 100644 index 00000000..cd0b6949 --- /dev/null +++ b/_docs/example-catalog/cd-examples/elastic-beanstalk.md @@ -0,0 +1,136 @@ +--- +title: "Deploy to Elastic Beanstalk" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/elastic-beanstalk/ + - /docs/deploy-your-containers/elastic-beanstalk/ +toc: true +--- + + +## Prerequisites + +- Configured Application in Elastic Beanstalk service
      + See: [http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/GettingStarted.html](http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/GettingStarted.html){:target="_blank"} + + +## Deployment with Codefresh +- Add encrypted environment variables for AWS credentials: + * `AWS_ACCESS_KEY_ID` + * `AWS_SECRET_ACCESS_KEY` + +- Provide the following environment variables: + * `AWS_REGION` + * `AWS_ENV_NAME` + * `AWS_VERSION` + * `AWS_BRANCH` + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png" +alt="codefresh_eb_env_vars.png" +max-width="40%" +%} + +{{site.data.callout.callout_info}} +{% raw %} +The ``${{AWS_VERSION}}`` of application you can find in the Elastic Beanstalk service. +{% endraw %} +{{site.data.callout.end}} + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_version_label.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_version_label.png" +alt="codefresh_eb_version_label.png" +max-width="40%" +%} + +{{site.data.callout.callout_info}} +{% raw %} +The ``${{AWS_ENV_NAME}}`` of application you can find in the Elastic Beanstalk service. +{% endraw %} +{{site.data.callout.end}} + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_environment.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_environment.png" +alt="codefresh_eb_environment.png" +max-width="40%" +%} + +Add the following step to codefresh.yml: + + `deploy_step` +{% highlight yaml %} +{% raw %} +deploy-elastic-beanstalk: + fail-fast: false + image: garland/aws-cli-docker:latest + commands: + - sh -c "aws configure set region '${{AWS_REGION}}' && aws elasticbeanstalk update-environment --environment-name '${{AWS_ENV_NAME}}' --version-label '${{AWS_VERSION}}' " + when: + condition: + all: + masterBranch: "'${{CF_BRANCH}}' == '${{AWS_BRANCH}}'" +{% endraw %} +{% endhighlight %} + +{:.text-secondary} +## Deployment Flow +- Go to the Elastic Beanstalk service and create an application and environment. + + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png" +alt="codefresh_eb_environment.png" +max-width="40%" +%} + +- Perform the following commands from root of your project: + * eb init + * eb create {% raw %}`${{AWS_ENV_NAME}}`{% endraw %} + + + +>Note: + If you don't have awsebcli - install EB CLI [http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3-install.html](http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/eb-cli3-install.html){:target="_blank"}. + + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_health.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_health.png" +alt="codefresh_eb_health.png" +max-width="40%" +%} + +- Add this repository to Codefresh, provide the necessary environments variables and build this service + +{% include +image.html +lightbox="true" +file="/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png" +url="/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png" +alt="codefresh_eb_cf_step_deploy.png" +max-width="40%" +%} + +## Example + +* [cf-example-deploy-elasticbeanstalk](https://github.com/codefreshdemo/cf-example-deploy-elasticbeanstalk){:target="_blank"} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/helm.md b/_docs/example-catalog/cd-examples/helm.md new file mode 100644 index 00000000..1b104663 --- /dev/null +++ b/_docs/example-catalog/cd-examples/helm.md @@ -0,0 +1,225 @@ +--- +title: "Deploy with Helm" +description: "Use Helm in a Codefresh pipeline" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Helm](https://helm.sh/){:target=\_blank"} is the package manager for Kubernetes. +Codefresh has comprehensive support for Helm: + +* Free [built-in Helm repository]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/) with each Codefresh account +* [Helm chart dashboard]({{site.baseurl}}/docs/docs/deployments/add-helm-repository/) to track your charts +* [Helm Release dashboard]({{site.baseurl}}/docs/docs/deployments/helm-releases-management/) to view your deployments +* [Environment dashsboard]({{site.baseurl}}/docs/deployments/kubernetes/environment-dashboard/) to view Helm releases +* [Helm promotion dashboard]({{site.baseurl}}/docs/deployments/helm-environment-promotion/) to promote Helm releases +* Add any external Helm repository on any other cloud provider + +Codefresh also provides a [pipeline step]({{site.baseurl}}/docs/new-helm/using-helm-in-codefresh-pipeline/) for deploying with Helm. + +For more insights on Helm charts see also our [Helm best practices]({{site.baseurl}}/docs/new-helm/helm-best-practices/) guide. + + +## The example Helm project + +You can see the example project at [https://github.com/codefresh-contrib/helm-sample-app](https://github.com/codefresh-contrib/helm-sample-app){:target=\_blank"}. The repository contains a simple Go application, a Dockerfile and an example chart. + + +## Prerequisites + +[At least one Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) in your Codefresh account. + +>Notice that if you still use Helm 2 you should also have installed the server side of Helm 2 (Tiller) using `helm init`. This command is best run from the cloud console of your cluster. The respective pipelines of this guide are in the [helm-2 branch](https://github.com/codefresh-contrib/helm-sample-app/tree/helm-2){:target=\_blank"}. + + + +## CI/CD pipeline with Helm deployment + +It is possible to deploy directly a Helm chart as it exists on the filesystem. This is not the recommended way to use Helm, because you are bypassing the Helm chart repository, but it is certainly the simplest Helm pipeline possible. + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-deploy-pipeline.png" +url="/images/examples/helm/helm-deploy-pipeline.png" +alt="Pipeline for Helm deployment" +caption="Pipeline for Helm deployment" +max-width="100%" +%} + +Here is the whole pipeline: + + `codefresh-do-not-store.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + clone: + title: Cloning main repository... + stage: prepare + type: git-clone + arguments: + repo: codefresh-contrib/helm-sample-app + revision: master + git: github + build: + title: Building Docker Image + stage: build + type: build + working_directory: ./helm-sample-app + arguments: + image_name: helm-sample-app-go + tag: multi-stage + dockerfile: Dockerfile + deploy: + title: Deploying Helm Chart + type: helm + stage: deploy + working_directory: ./helm-sample-app + arguments: + action: install + chart_name: charts/helm-example + release_name: my-go-chart-prod + helm_version: 3.0.2 + kube_context: my-demo-k8s-cluster + custom_values: + - 'buildID=${{CF_BUILD_ID}}' + - 'image_pullPolicy=Always' + - 'image_tag=multi-stage' + - 'replicaCount=3' + - 'image_pullSecret=codefresh-generated-r.cfcr.io-cfcr-default' +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +1. Builds a docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) +1. Deploys the Helm chart to a cluster named `my-demo-k8s-cluster` using the Helm step [from the Step Marketplace](https://codefresh.io/steps/step/helm){:target=\_blank"}. + +In this example, `charts/helm-example` refers to the [filesystem location in the code](https://github.com/codefresh-contrib/helm-sample-app/tree/master/charts/helm-example){:target=\_blank"} that was just checked out. + +The deployment will be visible in the [Helm releases dashboard]({{site.baseurl}}/docs/new-helm/helm-releases-management/). + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-release.png" +url="/images/examples/helm/helm-release.png" +alt="Helm release view" +caption="Helm release view" +max-width="100%" +%} + +If you want to run this example yourself, make sure to edit the chart and put your own values there for the Docker image. + +## CI/CD pipeline with Helm deployment that also stores the chart + +It is recommended to use a Helm repository to store your chart before deploying it. This way you know what is deployed in your clusters +and you can also reuse charts in other installations. + +First of all you need to import in your pipeline from the [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) the settings for the internal Helm repository (or any other external repository that you have setup in Codefresh). + This will make available the internal Helm repository to your pipeline so that it can push/pull Helm charts from it. + + {% include image.html + lightbox="true" + file="/images/examples/helm/import-helm-configuration.png" + url="/images/examples/helm/import-helm-configuration.png" + alt="Using the default Helm repository in a Pipeline" + caption="Using the default Helm repository in a Pipeline" + max-width="40%" + %} + +Once that is done you can change your pipeline to also store the chart first and *then* deploy it. + + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-push-and-deploy-pipeline.png" +url="/images/examples/helm/helm-push-and-deploy-pipeline.png" +alt="Pipeline for Helm deployment that stores chart" +caption="Pipeline for Helm deployment that stores chart" +max-width="100%" +%} + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - store + - deploy +steps: + clone: + title: Cloning main repository... + stage: prepare + type: git-clone + arguments: + repo: codefresh-contrib/helm-sample-app + revision: master + git: github + build: + title: Building Docker Image + stage: build + type: build + working_directory: ./helm-sample-app + arguments: + image_name: helm-sample-app-go + tag: multi-stage + dockerfile: Dockerfile + store: + title: Storing Helm Chart + type: helm + stage: store + working_directory: ./helm-sample-app + arguments: + action: push + chart_name: charts/helm-example + kube_context: my-demo-k8s-cluster + deploy: + type: helm + stage: deploy + working_directory: ./helm-sample-app + arguments: + action: install + chart_name: charts/helm-example + release_name: my-go-chart-prod + helm_version: 3.0.2 + kube_context: my-demo-k8s-cluster + custom_values: + - 'buildID=${{CF_BUILD_ID}}' + - 'image_pullPolicy=Always' + - 'image_tag=multi-stage' + - 'replicaCount=3' + - 'image_pullSecret=codefresh-generated-r.cfcr.io-cfcr-default' +{% endraw %} +{% endhighlight %} + + +After you finish running your pipeline, not only the deployment will take place, but you will also see your chart in your [Helm Chart dashboard]({{site.baseurl}}/docs/new-helm/add-helm-repository/): + +{% include image.html +lightbox="true" +file="/images/examples/helm/helm-chart.png" +url="/images/examples/helm/helm-chart.png" +alt="Stored Helm chart" +caption="Stored Helm chart" +max-width="80%" +%} + +It is also possible to [run your own Helm commands]({{site.baseurl}}/docs/deployments/helm/using-helm-in-codefresh-pipeline/#example-custom-helm-commands) in a Codefresh pipeline. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/cd-examples/import-data-to-mongodb.md b/_docs/example-catalog/cd-examples/import-data-to-mongodb.md new file mode 100644 index 00000000..68a6c79a --- /dev/null +++ b/_docs/example-catalog/cd-examples/import-data-to-mongodb.md @@ -0,0 +1,60 @@ +--- + +title: "Import data to MongoDB" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/import-data-to-mongodb-in-composition/ + - /docs/on-demand-test-environment/example-compositions/import-data-to-mongodb/ +toc: true +--- + +If you want to import/restore or to do something else before using MongoDB in your application, you can look at the following example. + +You just need to create Dockerfile for mongo seed service and provide the command to prepare MongoDB. In this case it's command `mongoimport` + + `Dockerfile mongo_seed` +{% highlight docker %} +FROM mongo +COPY init.json /init.json +CMD mongoimport --host mongodb --db exampleDb --collection contacts --type json --file /init.json --jsonArray +{% endhighlight %} + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Let's quickly review the contents of this file: + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + mongodb: + image: mongo + command: mongod --smallfiles + ports: + - 27017 + + mongo_seed: + image: ${{mongo_seed}} + links: + - mongodb + + client: + image: ${{build_prj}} + links: + - mongodb + ports: + - 9000 + environment: + - MONGO_URI=mongodb:27017/exampleDb +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +You can add the following example to your GitHub or Bitbucket account, and build the [example](https://github.com/codefreshdemo/cf-example-manage-mongodb){:target="_blank"}. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/nodejs-angular2-mongodb.md b/_docs/example-catalog/cd-examples/nodejs-angular2-mongodb.md new file mode 100644 index 00000000..f4e69839 --- /dev/null +++ b/_docs/example-catalog/cd-examples/nodejs-angular2-mongodb.md @@ -0,0 +1,52 @@ +--- +title: "NodeJS + Angular2 + MongoDB" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/nodejs-angular2-mongodb/ + - /docs/on-demand-test-environment/example-compositions/nodejs-angular2-mongodb/ +toc: true +--- +This tutorial will walk you through the process of adding the following: + +- Build client +- Build server +- Launch composition + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Let's quickly review the contents of this file: + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + mongodb: + image: mongo + ports: + - 28017 + server: + image: ${{build_server}} + environment: + - MONGO_URI=mongodb://mongodb/exampleDb + links: + - mongodb + ports: + - 9000 + client: + image: ${{build_client}} + ports: + - 3000 +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/nodejs-angular2-mongo){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) diff --git a/_docs/example-catalog/cd-examples/nomad.md b/_docs/example-catalog/cd-examples/nomad.md new file mode 100644 index 00000000..a7e78d79 --- /dev/null +++ b/_docs/example-catalog/cd-examples/nomad.md @@ -0,0 +1,225 @@ +--- +title: "Deploy to Nomad" +description: "Deploy Docker images to a Nomad cluster with Codefresh" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +Even though Codefresh has great support for Kubernetes and Helm deployments, there is no lock-in on using just Kubernetes. Codefresh can deploy on any infrastructure. + + +[Nomad](https://www.nomadproject.io/){:target=\_blank"} is an alternative scheduling platform from Hashicorp. It supports docker containers (like Kubernetes), but you can also use Nomad to schedule VMs, Java apps, Go apps or any other standalone executable. + +There are several public Docker Images with Nomad, so it is very easy to use Codefresh pipelines to deploy to a Nomad cluster. + + +{% include image.html +lightbox="true" +file="/images/examples/nomad/nomad-ci-pipeline.png" +url="/images/examples/nomad/nomad-ci-pipeline.png" +alt="Deploying to Nomad with Codefresh" +caption="Deploying to Nomad with Codefresh" +max-width="80%" +%} + +In this example, we will use the image at [https://hub.docker.com/r/djenriquez/nomad](https://hub.docker.com/r/djenriquez/nomad){:target=\_blank"}. + +## The example Nomad project + +You can see the example project at [https://github.com/codefresh-contrib/nomad-sample-app](https://github.com/codefresh-contrib/nomad-sample-app){:target=\_blank"}. The repository contains a simple job specification that deploys a docker container on nomad cluster. + + +Here is the whole job file: + + `docker-job.hcl` +{% highlight hcl %} +{% raw %} +job "example-job" { + # Specify this job should run in the region named "us". Regions + # are defined by the Nomad servers' configuration. + #region = "us" + + # Spread the tasks in this job between us-west-1 and us-east-1. + datacenters = ["dc1"] + + # Run this job as a "service" type. Each job type has different + # properties. See the documentation below for more examples. + type = "service" + + # Specify this job to have rolling updates, two-at-a-time, with + # 30 second intervals. + update { + stagger = "30s" + max_parallel = 1 + } + + # A group defines a series of tasks that should be co-located + # on the same client (host). All tasks within a group will be + # placed on the same host. + group "example-group" { + # Specify the number of these tasks we want. + count = 3 + + # Create an individual task (unit of work). This particular + # task utilizes a Docker container to front a web application. + task "example-task" { + # Specify the driver to be "docker". Nomad supports + # multiple drivers. + driver = "docker" + + # Configuration is specific to each driver. + config { + image = "r.cfcr.io/$CF_ACCOUNT/$CF_REPO_NAME:$CF_BRANCH_TAG_NORMALIZED" + + auth { + username = "$CF_ACCOUNT" + password = "$CFCR_LOGIN_TOKEN" + server_address = "r.cfcr.io" + } + + port_map { + http = 8080 + } + } + + # The service block tells Nomad how to register this service + # with Consul for service discovery and monitoring. + service { + # This tells Consul to monitor the service on the port + # labelled "http". Since Nomad allocates high dynamic port + # numbers, we use labels to refer to them. + port = "http" + + check { + type = "http" + path = "/" + interval = "10s" + timeout = "2s" + } + } + + # Specify the maximum resources required to run the task, + # include CPU, memory, and bandwidth. + resources { + cpu = 500 # MHz + memory = 128 # MB + + network { + mbits = 100 + + + port "http" {} + + + + } + } + } + } +} + +{% endraw %} +{% endhighlight %} + +Notice that the job specification has several [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/) embedded. We will use [envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html){:target=\_blank"} in our pipeline to replace +them with the correct values. + +## Prerequisites + +You need to create a Codefresh account and have a Nomad cluster running. You need to decide on how Codefresh will communicate +with the nomad cluster. In this simple example we just use the `NOMAD_ADDR` variable to point the nomad client to our cluster. In a production environment you should use proper [ACL](https://www.nomadproject.io/guides/security/acl.html){:target=\_blank"} and [certificate](https://www.nomadproject.io/guides/security/securing-nomad.html){:target=\_blank"} variables as well. + + +In this example the Nomad cluster is already setup on a VM at Google cloud. + +You also need to create a [token for the Docker registry]({{site.baseurl}}/docs/integrations/docker-registries/) so that Nomad can pull your private images on the cluster. + +## Create a CI/CD pipeline for Nomad deployments + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "deploy" +steps: + main_clone: + type: "git-clone" + title: "Clone main repository..." + repo: "codefresh-contrib/nomad-sample-app" + revision: "${{CF_BRANCH}}" + stage: "clone" + build: + title: "Building Docker Image" + type: "build" + image_name: "nomad-sample-app" + tag: "${{CF_BRANCH_TAG_NORMALIZED}}" + dockerfile: "Dockerfile" + stage: "build" + prepareJob: + title: "Preparing Nomad job" + image: bhgedigital/envsubst + stage: deploy + commands: + - envsubst < docker-job.hcl > docker-job-export.hcl + - cat docker-job-export.hcl + runJob: + title: "Deploying Nomad job" + image: djenriquez/nomad + stage: deploy + commands: + - nomad run docker-job-export.hcl +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Creates a Docker image for a simple Go application through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). The image is automatically pushed to the default Docker registry. +1. Replaces all variables in the job spec by running `envsubst`. These include: + * The Registry token so that Nomad can access the default Docker registry + * The docker image name and tag to be deployed +1. Runs the job to deploy the image to Nomad through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + +Run the pipeline and see your deployment succeed. + +Here are the environment variables defined for this pipeline. + +{% include image.html +lightbox="true" +file="/images/examples/nomad/nomad-variables.png" +url="/images/examples/nomad/nomad-variables.png" +alt="Pipeline variables for Nomad deployments" +caption="Pipeline variables for Nomad deployments" +max-width="50%" +%} + + +The `NOMAD_ADDR` variable is holding the URL of the cluster. The `CFCR_LOGIN_TOKEN` variable holds authentication for the Codefresh Docker registry. + +## Verify the deployment + +Nomad also comes with its own UI that can show you the result of a deployment. + +{% include image.html +lightbox="true" +file="/images/examples/nomad/nomad-ui-deployment.png" +url="/images/examples/nomad/nomad-ui-deployment.png" +alt="Nomad UI deployment" +caption="Nomad UI deployment" +max-width="80%" +%} + +You can also use [Terraform]({{site.baseurl}}/docs/example-catalog/cd-examples/terraform/) in Codefresh pipelines. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/packer-gcloud.md b/_docs/example-catalog/cd-examples/packer-gcloud.md new file mode 100644 index 00000000..58d12ade --- /dev/null +++ b/_docs/example-catalog/cd-examples/packer-gcloud.md @@ -0,0 +1,132 @@ +--- +title: "Deploy to a Virtual Machine" +description: "Deploy to Google Cloud in a Codefresh pipeline with Packer" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +Even though Codefresh is Kubernetes-native and designed for containers, it can still deploy traditional applications in the form of Virtual Machines to any Cloud provider. + +In this example, we will use [Packer](http://www.packer.io/){:target="\_blank"} to package an application into a VM disk image that will then be launched in Google Cloud. +Because Packer itself is already offered [in a Docker container](https://hub.docker.com/r/hashicorp/packer/){:target="\_blank"}, it is very easy to run Packer in a Codefresh pipeline. + +Google also offers a [Docker image for GCloud](https://hub.docker.com/r/google/cloud-sdk/){:target="\_blank"} making the launching of the VM straightforward in a Codefresh pipeline. + + +{% include image.html +lightbox="true" +file="/images/examples/packer-gcloud/packer-codefresh-pipeline.png" +url="/images/examples/packer-gcloud/packer-codefresh-pipeline.png" +alt="Running Packer inside Codefresh" +caption="Running Packer inside Codefresh" +max-width="80%" +%} + +This Codefresh pipeline creates a VM image and then uses it to launch a Google Compute instance. + + +## The example Packer/Gcloud project + +You can see the example project at [https://github.com/codefresh-contrib/vm-packer-sample-app](https://github.com/codefresh-contrib/vm-packer-sample-app){:target="\_blank"}. The repository contains a simple Go application as well as a packer template. + +You can play with it locally after installing the `packer` and `gcloud` executables. + +## Prerequisites + +You need to create a Codefresh account and a Google account first. Then you need to create a [Service account Key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys){:target="\_blank"} which will allow `packer` and `gcloud` to communicate with Google cloud. + + +Add your service account json as a pipeline variable called `SERVICE_ACCOUNT`. The content of this variable will be used +in order to authenticate to Google cloud. + +{% include image.html +lightbox="true" +file="/images/examples/packer-gcloud/service-account-variable.png" +url="/images/examples/packer-gcloud/service-account-variable.png" +alt="Using a Service Account JSON in Codefresh" +caption="Using a Service Account JSON in Codefresh" +max-width="50%" +%} + +## Create a CI/CD pipeline for Packer/GCloud + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: 'codefresh-contrib/vm-packer-sample-app' + git: github + revision: 'master' + stage: prepare + SetupAuth: + title: 'Setup GCloud Auth' + image: 'alpine' + stage: prepare + commands: + - echo $SERVICE_ACCOUNT > account.json + BuildMyApp: + title: Compiling App code + stage: build + image: 'golang:1.12' + commands: + - go build -o sample src/sample/trivial-web-server.go + CreatePackerImage: + title: Baking VM image + stage: build + image: 'hashicorp/packer' + commands: + - packer validate my-google-cloud-example.json + - packer build -force my-google-cloud-example.json + DeployToVM: + title: Deploying to VM + stage: deploy + image: 'google/cloud-sdk' + commands: + - gcloud auth activate-service-account --key-file=account.json + - gcloud config set project firstkubernetes-176201 + - gcloud compute instances create packer-demo-codefresh --image codefresh-simple-ubuntu-vm --zone europe-west1-b --metadata-from-file startup-script=startup.sh --tags http-server --preemptible --quiet + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Saves the content of the variable that holds the Google account as a file called `account.json`. +1. Compiles the Go application through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Runs `packer` to create a VM image based on Ubuntu that also contains the simple Go application. +1. Runs `gcloud` to launch a VM with the image that was just created. + + +Run the pipeline and see your deployment succeed. You can customize the image by editing the [Packer template](https://github.com/codefresh-contrib/vm-packer-sample-app/blob/master/my-google-cloud-example.json){:target="\_blank"}. + +Once the VM has finished launching you can access it with your web browser. + +{% include image.html +lightbox="true" +file="/images/examples/packer-gcloud/web-app-url.png" +url="/images/examples/packer-gcloud/web-app-url.png" +alt="Accessing the VM application" +caption="Accessing the VM application" +max-width="70%" +%} + + +You can follow the same procedure for any other cloud that has an API/CLI (such as AWS, Azure, Digital Ocean etc). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/pulumi.md b/_docs/example-catalog/cd-examples/pulumi.md new file mode 100644 index 00000000..74c1f3f7 --- /dev/null +++ b/_docs/example-catalog/cd-examples/pulumi.md @@ -0,0 +1,116 @@ +--- +title: "Deploy with Pulumi" +description: "Use Pulumi in a Codefresh pipeline with Docker" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Pulumi](https://pulumi.io/){:target="\_blank"} is a platform for *Infrastructure as Code*. It works like Terraform but allows you to use a proper programming language (TypeScript, Python, Go) to describe your infrastructure (instead of a configuration language). + +You can use Pulumi to deploy to Kubernetes or any other supported cloud platform. Because Pulumi itself is already offered [in a Docker container](https://hub.docker.com/r/pulumi/pulumi), it is very easy to run Pulumi in a Codefresh pipeline. + + +{% include image.html +lightbox="true" +file="/images/examples/pulumi/pulumi-pipeline.png" +url="/images/examples/pulumi/pulumi-pipeline.png" +alt="Running Pulumi inside Codefresh" +caption="Running Pulumi inside Codefresh" +max-width="80%" +%} + +## The example Pulumi project + +You can see the example project at [https://github.com/codefresh-contrib/pulumi-sample-app](https://github.com/codefresh-contrib/pulumi-sample-app){:target="\_blank"}. The repository contains a simple Pulumi stack based on Kubernetes and TypeScript. + +You can play with it locally after installing the `pulumi` executable. + +## Prerequisites + +You need to create a Codefresh account and a Pulumi account first. Then you need to create a [Pulumi token](https://app.pulumi.com/account/tokens){:target="\_blank"} which will allows Codefresh to communicate with Pulumi. + +[Add a Kubernetes cluster]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster/) in your Codefresh account from any cloud provider. + +Codefresh automatically creates a kubeconfig in any [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) with all your clusters. This is the same way that Pulumi communicated with Kubernetes, so the integration between Codefresh and Pulumi is ready out of the box. + +Create a [stack](https://pulumi.io/reference/stack.html){:target="\_blank"} in Pulumi or use the one provided in the example. + +Finally add you Pulumi token as a pipeline variable called `PULUMI_ACCESS_TOKEN`. All freestyle steps have automatic access to all pipeline variables, and Pulumi will search for a token by default with this name when logging in. + + +## Create a CI/CD pipeline for Pulumi + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - deploy +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: prepare + git: github-1 + BuildProject: + title: Build project + stage: build + image: pulumi/pulumi + commands: + - yarn install + SelectMyCluster: + title: Select K8s cluster + stage: deploy + image: codefresh/kubectl:1.13.3 + commands: + - kubectl config get-contexts + - kubectl config use-context "kostis-demo@FirstKubernetes" + RunPulumi: + title: Deploying + stage: deploy + image: pulumi/pulumi + commands: + - pulumi stack select dev --non-interactive + - pulumi stack --non-interactive + - pulumi up --non-interactive +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/pipelines/git-clone/). +1. Runs `yarn install` to download dependencies. In this example we use TypeScript, but Go and Python would work as well (or any other language supported by Pulumi). +1. Chooses the cluster that will be used for deployments, if you have more than one. Use your own cluster name as seen in the [Kubernetes dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/) of Codefresh. +1. Runs `pulumi up` with the same target cluster. + +The pipeline needs a [single environment variable]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) that holds the content of your Pulumi Token. + + +{% include image.html +lightbox="true" +file="/images/examples/pulumi/pulumi-access-token.png" +url="/images/examples/pulumi/pulumi-access-token.png" +alt="Passing the Pulumi Token in the pipeline parameters" +caption="Passing the Pulumi Token in the pipeline parameters" +max-width="60%" +%} + +Run the pipeline and see your deployment succeed. + +## Handling Pull requests + +You can easily use the same pipeline or a different one for pull requests. In this case replace the `pulumi up` command with `pulumi preview`. Even better you can add an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to allows humans to inspect the pipeline first. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth.md b/_docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth.md new file mode 100644 index 00000000..b7e2884c --- /dev/null +++ b/_docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth.md @@ -0,0 +1,92 @@ +--- +title: "Secure a Docker Container using HTTP Basic Auth" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/securing-docker-container-with-http-basic-auth/ + - /docs/on-demand-test-environment/examples-compositions/securing-docker-container-with-http-basic-auth/ + - /docs/on-demand-test-environment/example-compositions/secure-a-docker-container-using-http-basic-auth/ +toc: true +--- +Before making a product publicly available, you might want to restrict access to certain users. These are some options to accomplish this goal: + + - Implement custom authentication within the system + - Configure the server to act as a proxy between the user and the application + - Limit access to specific IP addresses + +This article explains how to secure a container by exposing public ports, using an extra NGINX container to act as a proxy. + +## Expose Web App Public Port + + `webapp` +{% highlight yaml %} +{% raw %} +version: '3' +services: + web: + image: codefreshio/webapp + ports: + - "3000" +{% endraw %} +{% endhighlight %} + +The architecture for this step is displayed in the diagram below. In this step example, Docker is forwarding an internal 3000 port to the host 80 port. + +{% include +image.html +lightbox="true" +file="/images/examples/docker-https/codefresh_webapp_container.png" +url="/images/examples/docker-https/codefresh_webapp_container.png" +alt="codefresh_webapp_container.png" +max-width="40%" +%} + +## Add NGINX Proxy +To secure the web-app we are going to specify these commands in the ```docker-compose.yml``` file. + +1. Remove the port that maps from the web-app (it won't be directly accessible) +2. Add an extra NGINX container with custom configuration (proxy all traffic) +3. Configure NGINX to communicate with the web-app + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + web: + image: ${{build-prj}} + auth: + image: ${{build-nginx}} + ports: + - 80 + links: + - web + environment: + USER: ${{USERNAME}} + PASS: ${{PASSWORD}} +{% endraw %} +{% endhighlight %} + +The architecture for the ```docker-compose.yml``` file is displayed in the diagram below. + +{% include +image.html +lightbox="true" +file="/images/examples/docker-https/codefresh_nginx_container.png" +url="/images/examples/docker-https/codefresh_nginx_container.png" +alt="codefresh_nginx_container.png" +max-width="40%" +%} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/cf-example-basic-auth-container){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper.md b/_docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper.md new file mode 100644 index 00000000..2134ff17 --- /dev/null +++ b/_docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper.md @@ -0,0 +1,203 @@ +--- +title: "Spring Boot + Kafka + Zookeeper" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/spring-boot-kafka-zookeeper/ + - /docs/on-demand-test-environment/example-compositions/spring-boot-kafka-zookeeper/ +toc: true +--- +This project uses `Java, Spring Boot, Kafka, Zookeeper` to show you how to integrate these services in the composition. + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/example-springboot-kafka){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Zookeeper Docker image + +Kafka uses ZooKeeper so you need to first start a ZooKeeper server if you don't already have one + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} + zookeeper: + image: wurstmeister/zookeeper + ports: + - "2181:2181" +{% endraw %} +{% endhighlight %} + +## Kafka Docker image +Now start the Kafka server. In the `docker-compose.yml` it can be something like this + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} + kafka: + build: + context: kafka + dockerfile: Dockerfile + links: + - zookeeper:zk + ports: + - "9092:9092" + environment: + KAFKA_ADVERTISED_HOST_NAME: $CF_HOST_IP + KAFKA_ZOOKEEPER_CONNECT: zk:2181 + KAFKA_MESSAGE_MAX_BYTES: 2000000 + KAFKA_CREATE_TOPICS: "Topic1:1:1" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + depends_on: + - zookeeper +{% endraw %} +{% endhighlight %} + +To start the Kafka server with the certain per-configuration, you need to use Environment variables. Below, you can see which Environment variables are available for this service. + +__Broker IDs__ + +You can configure the broker id in different ways: + +1. Explicitly, using ```KAFKA_BROKER_ID``` +2. Via a command, using ```BROKER_ID_COMMAND```, e.g. ```BROKER_ID_COMMAND: "hostname | awk -F'-' '{print $2}'"``` + +If you don't specify a broker id in your docker-compose file, it will automatically be generated (see [https://issues.apache.org/jira/browse/KAFKA-1070](https://issues.apache.org/jira/browse/KAFKA-1070){:target="_blank"}. This allows scaling up and down. In this case it is recommended to use the ```--no-recreate``` option of docker-compose to ensure that containers are not re-created and thus keep their names and ids. + + +__Automatically create topics__ + +If you want to have kafka-docker automatically create topics in Kafka during +creation, a ```KAFKA_CREATE_TOPICS``` environment variable can be +added in ```docker-compose.yml```. + +Here is an example snippet from ```docker-compose.yml```: + + environment: + KAFKA_CREATE_TOPICS: "Topic1:1:3,Topic2:1:1:compact" + +```Topic 1``` will have 1 partition and 3 replicas, ```Topic 2``` will have 1 partition, 1 replica and a `cleanup.policy` set to `compact`. + +__Advertised hostname__ + +You can configure the advertised hostname in different ways: + +1. Explicitly, using ```KAFKA_ADVERTISED_HOST_NAME``` +2. Via a command, using ```HOSTNAME_COMMAND```, e.g. ```HOSTNAME_COMMAND: "route -n | awk '/UG[ \t]/{print $$2}'"``` + +When using commands, make sure you review the "Variable Substitution" section in [https://docs.docker.com/compose/compose-file/](https://docs.docker.com/compose/compose-file/){:target="_blank"} + +If ```KAFKA_ADVERTISED_HOST_NAME``` is specified, it takes precedence over ```HOSTNAME_COMMAND``` + +For AWS deployment, you can use the Metadata service to get the container host's IP: +``` +HOSTNAME_COMMAND=wget -t3 -T2 -qO- http://169.254.169.254/latest/meta-data/local-ipv4 +``` +Reference: [http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html){:target="_blank"} + +__JMX__ + +For monitoring purposes, you may wish to configure JMX. Additional to the standard JMX parameters, problems could arise from the underlying RMI protocol used to connect + +* java.rmi.server.hostname - interface to bind listening port. +* com.sun.management.jmxremote.rmi.port - the port to service RMI requests. + +For example, to connect to a kafka running locally (assumes exposing port 1099) + + KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099" + JMX_PORT: 1099 + +## Spring Boot + Kafka +Then grab the spring-kafka JAR and all of its dependencies - the easiest way to do that is to declare a dependency in your build tool, e.g. for Maven: + + `Text` +{% highlight xml %} +{% raw %} + + org.springframework.kafka + spring-kafka + ${spring-kafka.version} + + + org.springframework.kafka + spring-kafka-test + ${spring-kafka.version} + test + +{% endraw %} +{% endhighlight %} + +Using plain Java to send and receive a message: + + `Java` +{% highlight java %} +{% raw %} +private static String BOOT_TOPIC = "boot.t"; + +@Autowired +private Sender sender; + +@Autowired +private Receiver receiver; + +@ClassRule +public static KafkaEmbedded embeddedKafka = new KafkaEmbedded(1, true, BOOT_TOPIC); + +@BeforeClass +public static void setUpBeforeClass() throws Exception { + System.setProperty("spring.kafka.bootstrap-servers", embeddedKafka.getBrokersAsString()); +} + +@Test +public void testReceive() throws Exception { + sender.send(BOOT_TOPIC, "Hello Boot!"); + + receiver.getLatch().await(10000, TimeUnit.MILLISECONDS); + assertThat(receiver.getLatch().getCount()).isEqualTo(0); +} +{% endraw %} +{% endhighlight %} + +Maven will download the needed dependencies, compile the code and run the unit test case. The result should be a successful build during which following logs are generated: + + `Java` +{% highlight java %} +{% raw %} +. ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v1.5.2.RELEASE) + +08:36:56.175 [main] INFO c.c.kafka.SpringKafkaApplicationTest - Starting SpringKafkaApplicationTest on cnf-pc with PID 700 (started by CodeNotFound in c:\code\st\spring-kafka\spring-kafka-avro) +08:36:56.175 [main] INFO c.c.kafka.SpringKafkaApplicationTest - No active profile set, falling back to default profiles: default +08:36:56.889 [main] INFO c.c.kafka.SpringKafkaApplicationTest - Started SpringKafkaApplicationTest in 1.068 seconds (JVM running for 5.293) +08:36:58.223 [main] INFO c.codenotfound.kafka.producer.Sender - sending user='{"name": "John Doe", "favorite_number": null, "favorite_color": "green"}' +08:36:58.271 [org.springframework.kafka.KafkaListenerEndpointContainer#0-0-L-1] INFO c.c.kafka.consumer.Receiver - received user='{"name": "John Doe", "favorite_number": null, "favorite_color": "green"}' +08:37:00.240 [main] ERROR o.a.zookeeper.server.ZooKeeperServer - ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes +Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.871 sec - in com.codenotfound.kafka.SpringKafkaApplicationTest + +Results: + +Tests run: 3, Failures: 0, Errors: 0, Skipped: 0 + +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 41.632 s +[INFO] Finished at: 2017-04-17T08:37:31+02:00 +[INFO] Final Memory: 18M/212M +[INFO] ------------------------------------------------------------------------ +{% endraw %} +{% endhighlight %} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/terraform.md b/_docs/example-catalog/cd-examples/terraform.md new file mode 100644 index 00000000..0dd05f46 --- /dev/null +++ b/_docs/example-catalog/cd-examples/terraform.md @@ -0,0 +1,113 @@ +--- +title: "Deploy with Terraform" +description: "Use Terraform in a Codefresh pipeline with Docker" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +[Terraform](https://www.terraform.io/){:target="\_blank"} is a platform for *Infrastructure as Code*. It allows you to describe your cloud infrastructure in a declarative manner. + +You can use Terraform to deploy to Kubernetes or any other supported cloud platform. Because Terraform itself is already offered [in a Docker container](https://hub.docker.com/r/hashicorp/terraform/){:target="\_blank"}, it is very easy to run Terraform in a Codefresh pipeline. + + +{% include image.html +lightbox="true" +file="/images/examples/terraform/terraform-pipeline.png" +url="/images/examples/terraform/terraform-pipeline.png" +alt="Running Terraform inside Codefresh" +caption="Running Terraform inside Codefresh" +max-width="80%" +%} + +## The example Terraform project + +You can see the example project at [https://github.com/codefresh-contrib/terraform-sample-app](https://github.com/codefresh-contrib/terraform-sample-app){:target="\_blank"}. The repository contains a simple Terraform definition that creates a VM on Google cloud. + +You can play with it locally after installing the `terraform` executable. + +## Prerequisites + +You need to [create a Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/) and a Google account first. Then you need to create a [Service account Key](https://cloud.google.com/iam/docs/creating-managing-service-account-keys){:target="\_blank"} which will allow terraform to communicate with Google cloud. + + +Add your service account json as a pipeline variable called `ACCOUNT_JSON_CONTENT`. The content of this variable will be used +in order to authenticate to Google cloud. + +## Create a CI/CD pipeline for Terraform + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - prepare + - deploy +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'codefresh-contrib/terraform-sample-app' + revision: master + git: github + SetupAuth: + image: alpine:3.9 + title: Setting up Google cloud auth + stage: prepare + commands: + - echo $ACCOUNT_JSON_CONTENT > /codefresh/volume/account.json + - cf_export GOOGLE_CLOUD_KEYFILE_JSON=/codefresh/volume/account.json + DeployWithTerraform: + image: hashicorp/terraform:0.12.0 + title: Deploying Terraform plan + stage: deploy + commands: + - terraform init + - terraform apply -auto-approve + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Creates a pipeline variable with the path of the Google service account by running [cf_export]({{site.baseurl}}/docs/pipelines/variables/#exporting-environment-variables-from-a-freestyle-step). +1. Creates the VM on Google cloud by running `terraform init/apply`. + +>For simplicity, we auto-approve the Terraform plan in the example pipeline. In a production pipeline, you would instead use an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to inspect the plan before actually applying it. + +The pipeline needs a [single environment variable]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) that holds the content of the service account. + + +{% include image.html +lightbox="true" +file="/images/examples/terraform/google_cloud_json.png" +url="/images/examples/terraform/google_cloud_json.png" +alt="Passing the Google account in the pipeline parameters" +caption="Passing the Google account in the pipeline parameters" +max-width="60%" +%} + + +Run the pipeline and see your deployment succeed. + + +Note that in a production pipeline you should also handle the [Terraform state](https://www.terraform.io/docs/state/){:target="\_blank"} in a proper manner. The example provided is using a file for [state storage](https://www.terraform.io/docs/backends/index.html){:target="\_blank"} which is not appropriate when using Terraform in a team environment. Instead you should use one of the [storage backends](https://www.terraform.io/docs/backends/types/index.html){:target="\_blank"} that support High Availability and Locking. + + + + +## Handling Pull requests + +You can easily use the same pipeline or a different one for pull requests. In this case replace the `terraform apply` command with `terraform plan`. Even better, you can add an [approval step]({{site.baseurl}}/docs/pipelines/steps/approval/) to allow humans to inspect the pipeline first. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/cd-examples/transferring-php-ftp.md b/_docs/example-catalog/cd-examples/transferring-php-ftp.md new file mode 100644 index 00000000..56aa1d27 --- /dev/null +++ b/_docs/example-catalog/cd-examples/transferring-php-ftp.md @@ -0,0 +1,118 @@ +--- +title: "Deploy to VM via FTP" +description: "Deploying a PHP application to a VM using FTP" +group: example-catalog +sub_group: cd-examples +toc: true +redirect_from: + - /docs//learn-by-example/java/spring-mvc-jdbc-template/ +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/account-management/create-a-codefresh-account/){:target="\_blank"} +- A remote machine with an FTP server and SSH setup (ensure that your FTP directory, I.e., `/srv/ftp/pub` has the proper write permissions for the FTP user). + +>Note that as you may already know, FTP is extremely insecure as it relies on plain-text passwords and usernames, making data very vulnerable to sniffing. A more secure solution would be to use SFTP or SCP. + +## Example PHP project + +The example project can be found on [GitHub](https://github.com/codefresh-contrib/ftp-php-app){:target="\_blank"}. The application is a simple PHP application that displays an example timer. + +{% include image.html +lightbox="true" +file="/images/examples/php-file-transfer/test-environment.png" +url="/images/examples/php-file-transfer/test-environment.png" +alt="Example PHP Application" +caption="Example PHP Application" +max-width="90%" +%} + +## Create the pipeline + +Our pipeline includes four stages: + +- A stage for cloning +- A stage for packaging +- A stage for transferring files + +{% include image.html +lightbox="true" +file="/images/examples/php-file-transfer/pipeline.png" +url="/images/examples/php-file-transfer/pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +Here is the entire pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ + +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "install" + - "transfer" +steps: + clone: + title: "Cloning main repository..." + type: "git-clone" + arguments: + repo: "codefresh-contrib/ftp-php-app" + git: "github" + stage: "clone" + install_dependencies: + title: "Collecting Php dependencies..." + type: "freestyle" + working_directory: "./ftp-php-app" + arguments: + image: "composer:1.9.3" + commands: + - "composer install --ignore-platform-reqs --no-interaction --no-plugins --no-scripts --prefer-dist" + stage: "install" + steps: + ftp_transfer: + title: "Transferring application to VM via ftp..." + type: "freestyle" + working_directory: "./ftp-php-app" + arguments: + image: "dockito/lftp-client:latest" + environment: + - USER= + - PASSWORD= + - HOST= + - PUB_FTP_DIR= + commands: + - lftp -e "set ftp:use-site-utime2 false; mirror -x ^\.git/$ -X flat-logo.png -p -R ftp-php-ap $PUB_FTP_DIR/ftp-php-app; exit" -u $USER,$PASSWORD $HOST + stage: "transfer" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the main repository through a [Git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Installs the necessary PHP dependencies for our application through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Transfers our application via FTP through another freestyle step. Note that you will need to change the environment variables to your respective values, either in the YAML itself (above), or through the pipeline settings: + +{% include image.html +lightbox="true" +file="/images/examples/php-file-transfer/variables.png" +url="/images/examples/php-file-transfer/variables.png" +alt="Codefresh Environment Variables" +caption="Codefresh Environment Variables" +max-width="90%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + diff --git a/_docs/example-catalog/cd-examples/trigger-a-k8s-deployment-from-docker-registry.md b/_docs/example-catalog/cd-examples/trigger-a-k8s-deployment-from-docker-registry.md new file mode 100644 index 00000000..c15084dd --- /dev/null +++ b/_docs/example-catalog/cd-examples/trigger-a-k8s-deployment-from-docker-registry.md @@ -0,0 +1,135 @@ +--- +title: "Trigger a Kubernetes Deployment from a Docker Hub Push Event" +description: "Learn how to trigger a Kubernetes deployment when an image is updated" +group: example-catalog +sub_group: cd-examples +toc: true +--- + +In this example, we will cover how to trigger a Kubernetes deployment from a Dockerhub Push event using a Dockerhub [registry trigger]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/#create-a-new-dockerhub-trigger). + +Our example has two pipelines: one for packaging code (CI), and the second for deploying code (CD). + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +- A DockerHub registry [connected to your Codefresh account]({{site.baseurl}}/docs/integrations/docker-registries/#docker-hub) +- A Kubernetes cluster [connected to your Codefresh account]({{site.baseurl}}/docs/integrations/kubernetes/#connect-a-kubernetes-cluster) +- A service for your application [deployed to your cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/#viewing-your-kubernetes-services) + +## Example Project + +You can see the example project on [GitHub](https://github.com/codefresh-contrib/registry-trigger-sample-app/tree/master){:target=\_blank"}. The repository contains a simple Hello World NodeJs app as well as 2 pipelines. + +## Create the CI Pipeline + +As mentioned before, our first pipeline will handle the CI process. +The pipeline has three stages: + +- A stage for cloning +- A stage for building the image +- A stage for pushing the image to DockerHub + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-deployment-ci-pipeline.png" +url="/images/examples/deployments/k8s-deployment-ci-pipeline.png" +alt="Codefresh UI CI Pipeline View" +caption="Codefresh UI CI Pipeline View" +max-width="90%" +%} + + `codefresh-CI-pipeline.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' + +stages: +- checkout +- build +- push + +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: checkout + arguments: + repo: 'codefresh-contrib/registry-trigger-sample-app' + revision: 'master' + git: github + build_my_app: + title: Building image... + type: build + stage: build + arguments: + image_name: registry-trigger-sample-app + working_directory: ${{clone}} + tag: 'master' + dockerfile: Dockerfile + push_to_my_registry: + stage: 'push' + type: push + title: Pushing to Dockerhub... + arguments: + candidate: ${{build_my_app}} + tag: 'latest' + registry: dockerhub + image_name: annabaker/registry-trigger-sample-app +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Builds a docker image tagged with the Application version through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +3. Pushes the Docker image through a [push step](https://codefresh.io/docs/docs/pipelines/steps/push/) to the Docker Hub registry you have integrated with Codefresh. + +## Create the CD Pipeline + +This pipeline contains one stage/step, for deploying. + +{% include image.html +lightbox="true" +file="/images/examples/deployments/k8s-deployment-CD-pipeline.png" +url="/images/examples/deployments/k8s-deployment-CD-pipeline.png" +alt="Codefresh UI CD Pipeline View" +caption="Codefresh UI CD Pipeline View" +max-width="90%" +%} + +Note that for the trigger mechanism to take place, you will need to [add a Docker Hub registry trigger]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/#create-a-new-dockerhub-trigger) to the pipeline. + + `codefresh-CD-pipeline.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - "deploy" + +steps: + deploy_to_k8s: + title: Running Deploy Script... + type: deploy + kind: kubernetes + arguments: + cluster: anna-demo@FirstKubernetes + namespace: default + service: registry-trigger-sample-app + candidate: + image: annabaker/registry-trigger-sample-app:latest + registry: 'dockerhub' +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Deploys the image to Kubernetes through a [deploy step]({{site.baseurl}}/docs/pipelines/steps/deploy/). The deploy step uses a [Registry trigger]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/#create-a-new-dockerhub-trigger) to kick off the pipeline when the updated image is pushed to the registry. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/) diff --git a/_docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step.md b/_docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step.md new file mode 100644 index 00000000..b228a895 --- /dev/null +++ b/_docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step.md @@ -0,0 +1,42 @@ +--- +title: "Use kubectl as part of freestyle step" +description: "How to run manually kubectl in a Codefresh pipeline" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/use-kubectl-as-part-of-freestyle-step/ +toc: true +--- + + +Running Kubernetes commands in Codefresh as part of the workflow is very easy. + + +Codefresh is adding all your clusters into the workflow ready to be used as part of your CI/CD pipeline. +The context remains the same as it appears in the [Codefresh Kubernetes dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/). + +>If your cluster name includes spaces then make sure that you use quotes in the `kubectl` command. + +* Use image: `codefresh/kubectl` +* Add your commands: + * `kubectl config get-contexts`. Will print the cluster that we added to the workflow + * `kubectl config use-context "my-cluster-name"`. The name is the same as in `Account settings` → `Integrations` → `Kubernetes` + * `kubectl get po -owide` + * `kubectl get nodes` + + +## Follow the example + +* Add this [Git repo](https://github.com/Codefresh-Examples/kubectl-in-freestyle-step){:target="_blank"} to your account +* Change the pipeline configuration to use `codefresh.yml`. +* Build. + +## Running parallel steps with kubectl + +More complex examples can be found in the [custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/) documentation page. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/cd-examples/web-terminal.md b/_docs/example-catalog/cd-examples/web-terminal.md new file mode 100644 index 00000000..37151528 --- /dev/null +++ b/_docs/example-catalog/cd-examples/web-terminal.md @@ -0,0 +1,48 @@ +--- +title: "Web terminal" +description: "" +group: example-catalog +sub_group: cd-examples +redirect_from: + - /docs/web-terminal/ + - /docs/on-demand-test-environment/example-compositions/web-terminal/ +toc: true +--- +This example shows you how to access containers running in a Codefresh standup environment. + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Here are the contents of this file: + + `Composition.yml` +{% highlight yaml %} +version: '3' +services: + my-service: + image: 'containers101/whomi:master' + volumes: + - my-service:/app + ports: + - '1337' + terminal: + image: 'containers101/cfterminal:master' + ports: + - '8000' + volumes_from: + - my-service +volumes: + my-service: + driver: local +{% endhighlight %} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/codefreshdemo/cf-example-web-termial){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#cd-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/android.md b/_docs/example-catalog/ci-examples/android.md new file mode 100644 index 00000000..a02d66b1 --- /dev/null +++ b/_docs/example-catalog/ci-examples/android.md @@ -0,0 +1,80 @@ +--- +title: "Compile and package an Android application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Android applications use Java/Gradle for their build system. Because Codefresh already supports [Gradle]({{site.baseurl}}/docs/example-catalog/ci-examples/gradle/), it is also very easy to build Android projects. + +Any Gradle command can run inside a Docker image that contains the Android SDK. As an example, we will use a [Nextcloud](https://hub.docker.com/r/nextcloudci/android){:target="\_blank"} image from Dockerhub. + + +## The example project + +You can see the example project at [https://github.com/codefresh-contrib/android-sample-app](https://github.com/codefresh-contrib/android-sample-app){:target="\_blank"}. The repository contains a Hello World Android project with the following tasks: + +* `./gradlew test` runs unit tests +* `./gradlew build` builds the application + + +## Create a CI pipeline that compiles/releases Android + +In most cases you would create a similar pipeline to a Gradle project. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/mobile/android-ci-pipeline.png" +url="/images/learn-by-example/mobile/android-ci-pipeline.png" +alt="Building and Testing an Android app" +caption="Building and Testing an Android app" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/android-sample-app/blob/master/codefresh.yml){:target="\_blank"} that uses a Docker image with the Android SDK in order to run Gradle. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/android-sample-app' + revision: master + git: github + TestIt: + title: Running Tests + stage: test + image: nextcloudci/android:android-48 + commands: + - chmod +x ./gradlew + - ./gradlew test --no-daemon --gradle-user-home=/codefresh/volume/.gradle + BuildIt: + title: Packaging Android App + stage: build + image: nextcloudci/android:android-48 + commands: + - ./gradlew build --no-daemon --gradle-user-home=/codefresh/volume/.gradle +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, runs unit tests and finally builds the Android application. + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#how-caching-works-in-codefresh) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for Maven/Gradle which keep their cache externally. By changing the location of the Gradle cache we make sure that Codefresh will cache automatically the Gradle libraries resulting in much faster builds. + + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + diff --git a/_docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository.md b/_docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository.md new file mode 100644 index 00000000..d81e5363 --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository.md @@ -0,0 +1,94 @@ +--- +title: "Build an Image from a different Git repository" +description: "Build microservices from other repositories" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-an-image-from-a-different-git-repository/ +toc: true +--- + +In most cases, your Codefresh pipeline checks out a single Git repository. Codefresh has great support also for [monorepos]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#using-the-modified-files-field-to-constrain-triggers-to-specific-folderfiles) if you have placed all your applications in a single repository. + +A Codefresh pipeline is not really tied to a specific Git repository, which means that by [checking out multiple Git repositories]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/#cloning-multiple-repositories) you can build Docker images from other unrelated repositories in a single pipeline if you wish to do so. + +## Building Docker images from other Git repositories + + +Here is a Codefresh pipeline that checks out two microservices from two different Git repositories. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/build-from-other-git-repo.png" +url="/images/examples/docker-build/build-from-other-git-repo.png" +alt="Checkout and build docker images" +caption="Checkout and build docker images" +max-width="100%" +%} + +And here is the [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/). + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - 'clone phase' + - 'build phase' +steps: + checkoutApp1: + title: 'Cloning first repository...' + type: git-clone + repo: kostis-codefresh/example_nodejs_postgres + revision: experiment1 + git: github + stage: 'clone phase' + checkoutApp2: + title: 'Cloning second repository...' + type: git-clone + repo: kostis-codefresh/trivial-go-web + revision: master + git: github + stage: 'clone phase' + myFirstDockerImage: + title: 'Building Microservice A' + type: build + dockerfile: Dockerfile + image_name: my-nodejs-image + tag: from-develop-branch + working_directory: './example_nodejs_postgres' + stage: 'build phase' + mySecondDockerImage: + title: 'Building Microservice B' + type: build + dockerfile: Dockerfile + working_directory: './trivial-go-web' + image_name: my-app-image + tag: from-master-branch + stage: 'build phase' +{% endraw %} +{% endhighlight %} + +The pipeline first checks out two different Git repositories, which themselves contain Dockerfiles. Then it creates a Docker image for each one using the respective Dockerfile. + +You can see both images in the [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/#viewing-docker-images) . + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/two-docker-images.png" +url="/images/examples/docker-build/two-docker-images.png" +alt="Docker images from other Git repos" +caption="Docker images from other Git repos" +max-width="100%" +%} + + +Notice that there are no explicit push steps in the pipeline, as all successful Codefresh pipelines automatically push to the private Docker registry. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +[Build step in pipelines in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build and Push an image]({{site.baseurl}}/docs/pipelines/examples/build-and-push-an-image/) +[Parallel pipelines]({{site.baseurl}}/docs/pipelines/advanced-workflows/) diff --git a/_docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location.md b/_docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location.md new file mode 100644 index 00000000..75d5b67f --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location.md @@ -0,0 +1,74 @@ +--- +title: "Build an Image by specifying a Dockerfile location" +description: "How to choose a Dockerfile to build with Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-an-image-specify-dockerfile-location/ +toc: true +--- + +You may have a project where the Dockerfile is **not** in the root folder of the project. Maybe the repository has multiple projects inside, each with its own Dockerfile, or you simply want to use a different folder for the Docker context. + +>The source code of the repository is at [https://github.com/codefreshdemo/cf-example-dockerfile-other-location](https://github.com/codefreshdemo/cf-example-dockerfile-other-location){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + + +## Building a Dockerfile from a different folder + +By default, if you run a single command like the one below, Docker uses the Dockerfile of the current folder: + +``` +docker build . -t my-web-app +``` + +If your Dockerfile is in a different folder, specify it explicitly with: + +``` +docker build . -t my-web-app -f subfolder/Dockerfile +``` + +Codefresh supports a similar syntax as well. The `dockerfile` property of the [build step]({{site.baseurl}}/docs/pipelines/steps/build/) can accept a full path. + +Here is the full pipeline: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-dockerfile-other-location' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '.' + tag: 'master' + dockerfile: docker/Dockerfile +{% endhighlight %} + +This pipeline checks out the source code of the repository and then builds a Dockerfile found at the subfolder `docker` while still keeping as Docker context the root directory. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/build-spefify-dockerfile.png" +url="/images/examples/docker-build/build-spefify-dockerfile.png" +alt="Building a Docker image with specific Dockerfile" +caption="Building a Docker image with specific Dockerfile" +max-width="100%" +%} + +You could also change the Docker build context by editing the `working_directory` property. By default, it looks at the root folder of the project, but any subfolder path is also valid. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build and push an Image]({{site.baseurl}}/docs/yaml-examples/example-catalog/ci-examples/build-and-push-an-image) +[Build an Image With build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/build-an-image-with-build-arguments.md b/_docs/example-catalog/ci-examples/build-an-image-with-build-arguments.md new file mode 100644 index 00000000..a7a62356 --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-with-build-arguments.md @@ -0,0 +1,133 @@ +--- +title: "Build an Image with build arguments" +description: "Use Docker arguments in Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-an-image-with-build-arguments/ +toc: true +--- + +Building a Docker image that requires build arguments is very easy with Codefresh pipelines. + +The source code of the repository is at [https://github.com/codefreshdemo/cf-example-build-arguments](https://github.com/codefreshdemo/cf-example-build-arguments){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + +## Using Docker build arguments + +The example application is a very simple NodeJS application with the following DYouockerfile: + +`Dockerfile` +{% highlight docker %} +{% raw %} +ARG NODE_VERSION +FROM node:$NODE_VERSION + +ARG APP_DIR + +RUN mkdir -p $APP_DIR + +WORKDIR $APP_DIR + +COPY package.json . +RUN npm install --silent +COPY . . +EXPOSE 3000 + +ENV PORT 3000 + +CMD [ "npm", "start" ] +{% endraw %} +{% endhighlight %} + +This Dockerfile expects two [build arguments](https://docs.docker.com/engine/reference/builder/#/arg){:target="\_blank"}: + +* `NODE_VERSION` is the version of Node image to use as base +* `APP_DIR` is the source directory to be used inside the container + +## Building a Dockerfile passing values for build arguments + +When you build an image locally on your workstation, you can define build arguments with the `--build-arg` syntax: + +``` +docker build . -t my-node-app --build-arg NODE_VERSION=8 --build-arg APP_DIR=/usr/src/app +``` + +You can get the same result within a Codefresh pipeline: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-build-arguments' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '.' + tag: 'master' + dockerfile: Dockerfile + build_arguments: + - NODE_VERSION=8 + - APP_DIR=/usr/src/app +{% endraw %} +{% endhighlight %} + +This pipeline checks out the source code of the repository and then builds the Dockerfile by passing the values `8` and `/usr/src/app` to the two arguments. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/docker-build-arguments.png" +url="/images/examples/docker-build/docker-build-arguments.png" +alt="Using Docker build arguments in a pipeline" +caption="Using Docker build arguments in a pipeline" +max-width="100%" +%} + +## Using Codefresh variables as build arguments + +In the previous pipeline, the Docker build arguments are defined in the pipeline itself, but you can also use [pipeline variables]({{site.baseurl}}/docs/pipelines/pipelines/#creating-new-pipelines), [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/), or any other standard mechanism you already have in place. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-build-arguments' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '.' + tag: 'master' + dockerfile: Dockerfile + build_arguments: + - NODE_VERSION=${{NODE_VERSION_FROM_SHARED_CONFIG}} + - APP_DIR=${{APP_DIR_PIPELINE_VARIABLE}} +{% endraw %} +{% endhighlight %} + +In this case, you can also use any of the built-in [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/). + + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +[Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build and push an Image]({{site.baseurl}}/docs/yaml-examples/example-catalog/ci-examples/build-and-push-an-image) diff --git a/_docs/example-catalog/ci-examples/build-an-image-with-the-dockerfile-in-root-directory.md b/_docs/example-catalog/ci-examples/build-an-image-with-the-dockerfile-in-root-directory.md new file mode 100644 index 00000000..a9c5cb2e --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-an-image-with-the-dockerfile-in-root-directory.md @@ -0,0 +1,67 @@ +--- +title: "Build an Image with the Dockerfile in root directory" +description: "Get started quickly with building Docker images" +group: example-catalog +sub_group: ci-examples +toc: true +--- +Building a Docker image is one of the basic operations in Codefresh pipelines. + +>The source code of the repository is at [https://github.com/codefreshdemo/cf-yml-example-build-dockerfile-inroot](https://github.com/codefreshdemo/cf-yml-example-build-dockerfile-inroot){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + + +## Building a Dockerfile from the root folder + +By default, if you run a single command like the one below, Docker uses the Dockerfile of the current folder: + +``` +docker build . -t my-web-app +``` + +You can get the same result within a Codefresh pipeline: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-yml-example-build-dockerfile-inroot' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + image_name: my-app + working_directory: '${{main_clone}}' + tag: 'master' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This pipeline checks out the source code of the repository and then builds a dockerfile found at the root folder of the project. + +{% include image.html +lightbox="true" +file="/images/examples/docker-build/build-dockerfile-root.png" +url="/images/examples/docker-build/build-dockerfile-root.png" +alt="Building a Docker image with a default Dockerfile" +caption="Building a Docker image with a default Dockerfile" +max-width="100%" +%} + +You can also change the Docker build context by editing the `working_directory` property. By default, it looks at the root folder of the project, but any subfolder path is also valid. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build and push an Image]({{site.baseurl}}/docs/yaml-examples/example-catalog/ci-examples/build-and-push-an-image) +[Build an Image With build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) diff --git a/_docs/example-catalog/ci-examples/build-and-push-an-image.md b/_docs/example-catalog/ci-examples/build-and-push-an-image.md new file mode 100644 index 00000000..33ebac63 --- /dev/null +++ b/_docs/example-catalog/ci-examples/build-and-push-an-image.md @@ -0,0 +1,137 @@ +--- +title: "Build and push an Image" +description: "Build Docker images and push them to registries with Codefresh" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/build-and-push-an-image/ + - /docs/docker-registries/push-image-to-a-docker-registry/ +toc: true +--- + +Building a Docker image and then pushing it to a registry is one of the most basic scenarios for creating a pipeline. +In this example we will use a demo Node.js application that will be packaged in a Docker image. + +The source code of the repository is at [https://github.com/codefreshdemo/cf-example-build-and-push](https://github.com/codefreshdemo/cf-example-build-and-push){:target="\_blank"}. Feel free to fork it if you want to follow along. + +If you don't have a Codefresh account already, you can easily create a free one from the [sign-up page]({{site.baseurl}}/docs/administration/create-a-codefresh-account/). + + +## Building and push Docker image to default registry + +Building a Docker image with Codefresh is easy, and only requires a simple step. In addition, all successful pipelines in Codefresh automatically push to [your default Docker registry]({{site.baseurl}}/docs/docker-registries/#the-default-registry), without additional configuration, if you have one. + +Here is the most basic pipeline that clones a repo and builds an image: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- checkout +- build +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: checkout + repo: 'codefreshdemo/cf-example-build-and-push' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + stage: build + image_name: my-node-js-app + working_directory: {{clone}} + tag: 'master' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +## Building and pushing Docker image to _any registry_. + +You can push your image to any [registry]({{site.baseurl}}/docs/docker-registries/). + +* First you need to connect your external registry in the integrations page. Here are the instructions for: + + * [Docker Hub]({{site.baseurl}}/docs/integrations/docker-registries/docker-hub/) + * [Google Container Registry]({{site.baseurl}}/docs/integrations/docker-registries/google-container-registry/) + * [Amazon EC2 Container Registry]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) + * [Bintray.io]({{site.baseurl}}/docs/integrations/docker-registries/bintray-io/) + * [Quay.io]({{site.baseurl}}/docs/integrations/docker-registries/quay-io/) + * [Other Registries]({{site.baseurl}}/docs/integrations/docker-registries/other-registries/) + +* Then add a [push step]({{site.baseurl}}/docs/pipelines/steps/push/) in your pipeline and use the registry name of your integration. + +Here is the full example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- checkout +- build +- push +steps: + clone: + title: Cloning main repository... + type: git-clone + stage: checkout + repo: 'codefreshdemo/cf-example-build-and-push' + revision: 'master' + git: github + build_my_app: + title: Building Node.Js Docker Image + type: build + stage: build + image_name: my-node-js-app + working_directory: {{clone}} + tag: 'master' + dockerfile: Dockerfile + push_to_my_registry: + stage: 'push' + type: push + title: Pushing to a registry + candidate: ${{build_my_app}} + tag: 'v1.0.0' + registry: dockerhub + image_name: kkapelon/my-node-js-app +{% endraw %} +{% endhighlight %} + +Here we use a specific tag - `v1.0.0` but +Codefresh has several variables that you can use to tag images. Common examples are `CF_BRANCH_TAG_NORMALIZED`, `CF_SHORT_REVISION` or `CF_BUILD_ID`. Read more on [variables]({{site.baseurl}}/docs/pipelines/variables/). + +{% include image.html + lightbox="true" + file="/images/examples/docker-build/build-and-push-pipeline.png" + url="/images/examples/docker-build/build-and-push-pipeline.png" + alt="Pushing image to external registry" + caption="Pushing image to external registry" + max-width="100%" + %} + + +If you run the pipeline, the Docker image is pushed *both* to the private Docker regisry (by the build step) *and* the external docker registry (by the push step). + + +## More options for pushing images + +Codefresh has several options when it comes to pushing images: + +* You can specify multiple tags to be pushed +* You can use directly ECR registries +* You can embed credentials in the push steps + +Read more in [push steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/push/). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +[Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +[Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +[Build an Image With Build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) diff --git a/_docs/example-catalog/ci-examples/c-make.md b/_docs/example-catalog/ci-examples/c-make.md new file mode 100644 index 00000000..06b95d76 --- /dev/null +++ b/_docs/example-catalog/ci-examples/c-make.md @@ -0,0 +1,74 @@ +--- +title: "Compile and test a C application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any C/C++ application very easily as both `gcc` and `g++` are already offered in Dockerhub. There is also another example available with [C++ and cmake]({{site.baseurl}}/docs/example-catalog/ci-examples/cpp-cmake). + +## The example C project + +You can see the example project at [https://github.com/codefresh-contrib/c-sample-app](https://github.com/codefresh-contrib/c-sample-app){:target="\_blank"}. The repository contains a C starter project with a `Makefile` and several targets: + +* `make` compiles the code. +* `make test` runs unit tests +* `make clean` removes artifacts and binaries. + +There are also extra targets for `tags` and `etags`. + +## Create a CI pipeline for C applications + +Creating a CI/CD pipeline for C is very easy, because Codefresh can run any [gcc image](https://hub.docker.com/_/gcc/){:target="\_blank"} that you wish. Gcc docker images already contain the `make` utility. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/cc/c-make-pipeline.png" +url="/images/learn-by-example/cc/c-make-pipeline.png" +alt="Compiling a C application in a pipeline" +caption="Compiling a C application in a pipeline" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/c-sample-app/blob/master/codefresh.yml){:target="\_blank"} that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - build +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'codefresh-contrib/c-sample-app' + revision: master + git: github + compile_my_sources: + title: Compile + stage: build + image: gcc + commands: + - make + run_my_tests: + title: Test + stage: build + image: gcc + commands: + - make test +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, compiles the code and runs unit tests. In all cases we use the public Docker image of Gcc that also contains `make`. + + +## Related articles +[C++ example]({{site.baseurl}}/docs/example-catalog/ci-examples/cpp-cmake/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/call-child-pipelines.md b/_docs/example-catalog/ci-examples/call-child-pipelines.md new file mode 100644 index 00000000..fd83b5b7 --- /dev/null +++ b/_docs/example-catalog/ci-examples/call-child-pipelines.md @@ -0,0 +1,108 @@ +--- +title: "Call a CD pipeline from a CI pipeline" +description: "How to call child pipelines from a parent pipeline" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +In Codefresh you can easily create nested pipelines by calling other pipelines from within an existing pipeline. The [codefresh-run plugin](https://codefresh.io/steps/step/codefresh-run){:target="\_blank"} allows you to launch another pipeline, and optionally wait for its completion. + +{% include image.html +lightbox="true" +file="/images/examples/nested-pipelines/call-other-pipeline.png" +url="/images/examples/nested-pipelines/call-other-pipeline.png" +alt="Parent and child pipelines" +caption="Parent and child pipelines" +max-width="80%" +%} + +A very common pattern in Codefresh is to have a parent pipeline responsible for Continuous Integration (packaging code), that calls a child pipeline for Continuous Delivery (taking care of deployment). + +## Example project + +You can see the example project at [https://github.com/codefresh-contrib/call-child-pipeline-sample-app](https://github.com/codefresh-contrib/call-child-pipeline-sample-app){:target="\_blank"}. The repository contains a NodeJs app as well as three - one parent and two child pipelines. + +## Create a pipeline that calls other pipelines + +Here is the definition of the parent pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - package + - deploy +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: github + stage: prepare + read_my_app_version: + title: Reading Application version + stage: prepare + image: node:latest + commands: + - export PACKAGE_VERSION=$(node -p "require('./package.json').version") + - cf_export PACKAGE_VERSION + build_my_docker_image: + title: 'Building My Docker Image' + stage: package + type: build + dockerfile: Dockerfile + image_name: my-app-image + tag: ${{PACKAGE_VERSION}} + call_qa_pipeline: + title: Deploy to QA + stage: deploy + type: codefresh-run + arguments: + PIPELINE_ID: child-pipelines/qa-pipeline + VARIABLE: + - CF_BRANCH=${{CF_BRANCH}} + - CF_REVISION=${{CF_REVISION}} + - APP_VERSION=${{PACKAGE_VERSION}} + when: + branch: + only: + - develop + call_prod_pipeline: + title: Deploy to Prod + stage: deploy + type: codefresh-run + arguments: + PIPELINE_ID: child-pipelines/prod-pipeline + VARIABLE: + - CF_BRANCH=${{CF_BRANCH}} + - CF_REVISION=${{CF_REVISION}} + - APP_VERSION=${{PACKAGE_VERSION}} + when: + branch: + only: + - /^release.*/i + + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Creates a variable that contains the Application version as specified in `package.json` through [cf_export]({{site.baseurl}}/docs/pipelines/variables/#exporting-environment-variables-from-a-freestyle-step). +1. Builds a docker image tagged with the Application version through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Optionally runs the downstream QA pipeline if the branch is named `develop`. It also passes several environment variables to the child pipeline (including the Application version). +1. Optionally runs the downstream Prod pipeline if the branch name starts with `release`. It also passes several environment variables to the child pipeline (including the Application version). + +The last two steps use [conditions]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) to decide if they will run or not. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Pipeline plugins](https://codefresh.io/steps/){:target="\_blank"} \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/cc.md b/_docs/example-catalog/ci-examples/cc.md new file mode 100644 index 00000000..c23c08fc --- /dev/null +++ b/_docs/example-catalog/ci-examples/cc.md @@ -0,0 +1,10 @@ +--- +title: "C/C++" +description: "How to build C/C++ applications with Codefresh CI/CD pipelines" +group: example-catalog +toc: true +--- +This section contains Codefresh examples based on C and C++. + +- [C Example with make]({{site.baseurl}}/docs/learn-by-example/cc/c-make) +- [C++ Example with cmake]({{site.baseurl}}/docs/learn-by-example/cc/cpp-cmake) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/codacy-testing.md b/_docs/example-catalog/ci-examples/codacy-testing.md new file mode 100644 index 00000000..bd2e437b --- /dev/null +++ b/_docs/example-catalog/ci-examples/codacy-testing.md @@ -0,0 +1,174 @@ +--- +title: "Codacy coverage reports" +description: "How to forward coverage reports to Codacy" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Codacy](https://www.codacy.com/){:target="\_blank"} is a code review tool that allows automatic analysis, code coverage tracking, and extensive reports, for you and your team to improve your code quality over time. + +Analysis reports displayed within Codacy dashboard: +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-report.png" +url="/images/testing/codacy/codacy-report.png" +alt="Codacy UI with coverage reports" +max-width="100%" +%} + +## Prerequisites for using Codacy + +* A simple [Codefresh pipeline, up and running]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) +* A [Codacy account](https://www.codacy.com/){:target="\_blank"} (free, pro or enterprise) +* A testing tool added to your project that produces coverage reports + +Codacy supports over [30 different language integrations](https://docs.codacy.com/getting-started/supported-languages-and-tools/){:target="\_blank"}. Depending on the programming language used, it requires little to no set-up. + +You could try it out by cloning our [node example application](https://github.com/codefresh-contrib/codacy-sample-app){:target="\_blank"} that utilises [jest](https://jestjs.io/){:target="\_blank"}. + +## Create an account with Codacy +Codacy has a free version, a pro version, and an on-premises version. The latter two have a free trial, which allows you to test all features over the course of two weeks. You can sign-up via GitHub, Bitbucket, or GitLab. + +When you log into Codacy for the first time, it will ask you to provide access to a repository. At this stage, Codacy will not download any code from your repository but merely access its names. You can then either provide access to selective repositories or your entire git account. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-add-repo.png" +url="/images/testing/codacy/codacy-add-repo.png" +alt="Add repository to codacy" +max-width="80%" +%} + +## Generate Project API token +To use Codacy, we need a project API token. To generate the token, select your project => go to settings => integrations => add integration => select “Project API”. Make sure that you select the API token from here and not your general project settings. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/create-api-token.png" +url="/images/testing/codacy/create-api-token.png" +alt="Create Project API token" +max-width="80%" +%} + +## Codefresh pipeline + +In case the project that you want to use Codacy in does not have a pipeline, [create a new pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/). + +{% include image.html +lightbox="true" +file="/images/testing/codacy/create-codacy-pipeline.png" +url="/images/testing/codacy/create-codacy-pipeline.png" +alt="Create Codacy Pipeline" +max-width="80%" +%} + +**Setting-up step** + +This step is based on our [TypeScript application](https://github.com/codefresh-contrib/codacy-sample-app){:target="\_blank"}. Before we set up our pipeline, we will add our Project API token as our environment variable. Note that we have specified our token in the variables section on the right, as displayed in the following screenshot. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-variable.png" +url="/images/testing/codacy/codacy-variable.png" +alt="Provide Codacy ENV variable" +max-width="80%" +%} + +Once the variable is called through the [Codefresh yml syntax]({{site.baseurl}}/docs/pipelines/variables/), it automatically uses the value provided within the variables section. If you are using this example as your pipeline, please delete anything in your pipeline. We can then add the following pipeline to our Inline YAML within the Workflow section in our UI: + +{% highlight yaml %} +{% raw %} +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "build" + - "test" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "anais-codefresh/codacy-sample-app" + # CF_BRANCH value is auto set when pipeline is triggered + # Learn more at codefresh.io/docs/docs/pipelines/variables/ + revision: "${{CF_BRANCH}}" + git: "github" + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "anaisurlichs/codacy-sample-app" + working_directory: "${{clone}}" + tag: "${{CF_BRANCH_TAG_NORMALIZED}}" + dockerfile: "Dockerfile" + stage: "build" + registry: "dockerhub" + + tests: + title: "Running test" + type: "freestyle" + working_directory: '${{clone}}' + arguments: + image: 'node:15.2' + commands: + - "npm install --save-dev jest" + - "npm run test" + stage: "test" + + codacy: + title: "Pushing reports to codacy" + type: "freestyle" + working_directory: '${{clone}}' + arguments: + image: 'alpine:3.8' + commands: + - "export CODACY_PROJECT_TOKEN=${{CODACY_PROJECT_TOKEN}}" + - "wget -qO - https://coverage.codacy.com/get.sh | sh" + stage: "test" +{% endraw %} +{% endhighlight %} + +The last two steps, ’tests’ and ’codacy’, are used to run our tests, create our coverage reports and forward those to Codacy. If you are using your own project and existing pipeline, add those two steps to your pipeline. In case you are using your own application, make sure to adapt the commands within the test step to run the tests of your application. Additionally, ensure that both the ’repo’ and the ’image_name’ point to your integrations. + +Once you run the pipeline, the steps will create the coverage report and forwards it to Codacy. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-pipeline.png" +url="/images/testing/codacy/codacy-pipeline.png" +alt="Pipeline with Codacy step" +max-width="80%" +%} + +## View reports + +You can view the updated coverage reports within Codacy's UI every time you make a commit and/or run the Codefresh pipeline directly. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/codacy-report.png" +url="/images/testing/codacy/codacy-report.png" +alt="Codacy UI Analysis Dashboard" +max-width="80%" +%} + +You can access further information on the coverage report by opening the file tab and accessing a specific file from your repository. + +{% include image.html +lightbox="true" +file="/images/testing/codacy/file-analysis.png" +url="/images/testing/codacy/file-analysis.png" +alt="Codacy report details" +max-width="90%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Sonarqube Integration]({{site.baseurl}}/docs/testing/sonarqube-integration/) diff --git a/_docs/example-catalog/ci-examples/codecov-testing.md b/_docs/example-catalog/ci-examples/codecov-testing.md new file mode 100644 index 00000000..82e06f88 --- /dev/null +++ b/_docs/example-catalog/ci-examples/codecov-testing.md @@ -0,0 +1,128 @@ +--- +title: "Codecov coverage reports" +description: "How to forward coverage reports to Codecov" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Codecov account](https://codecov.io/){:target="\_blank"} is a code analysis tool with which users can group, merge, archive, and compare coverage reports. Code coverage describes which lines of code were executed by the test suite and which ones were not. However, this is not to be confused with a testing tool. + +Analysis reports displayed within the Codecov dashboard: +{% include image.html +lightbox="true" +file="/images/testing/codecov/analysis-report.png" +url="/images/testing/codecov/analysis-report.png" +alt="Codecov UI Analysis reports" +max-width="50%" +%} + +## Prerequisites for using Codecov + +* A simple [Codefresh pipeline up and running](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +* A [Codecov account](https://codecov.io/){:target="\_blank"} (free or enterprise) +* A testing tool added to your project that produces coverage reports + +Note that reports should ideally be written in .json, .xml, or txt. To be sure, please double check that your coverage [report format](https://docs.codecov.io/docs/supported-report-formats){:target="\_blank"} is supported. You can find a variety of examples for different programming languages and suggestions for respective testing tools in the [Codecov docs](https://docs.codecov.io/docs/supported-languages){:target="\_blank"}. + +To test Codecov and follow along with the next section, you can clone our [Codecov sample app](https://github.com/codefresh-contrib/codecov-sample-app){:target="\_blank"}. + +## Create a Codecov account + +Once you sign up to Codecov, you can add a new repository. The UI will then provide you with an access token to the repository. While it is recommended that you take note of the token, you will still be able to access it within the **Settings** tap. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-interface.png" +url="/images/testing/codecov/codecov-interface.png" +alt="Codecov Project Repository UI" +max-width="50%" +%} + +## Codefresh pipeline + +In this case, we divided testing and connecting Codefresh to Codecov into two different steps. If they can be run within the same image, you could also connect them. + +**Testing step** +Runs the command(s) for our testing tool. This will generate the code coverage report upon running the pipeline. Please refer to the Codecov documentation for [supported testing frameworks](https://docs.codecov.io/docs/supported-report-formats){:target="\_blank"}. The [README of each example](https://docs.codecov.io/docs/supported-languages){:target="\_blank"} refers to possible frameworks that can be used. + +In general, ensure that the framework you use for testing and generating code coverage reports: +* Produce code coverage reports in the supported file format +* Is compatible with the programming language that your program is written in + +{% highlight yaml %} +{% raw %} + test: + title: "Running test" + type: "freestyle" # Run any command + image: "node:14.19.0" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "npm install --save-dev jest" + - "npx jest --coverage" + stage: "test" +{% endraw %} +{% endhighlight %} + +**Codecov step** + +{% highlight yaml %} +{% raw %} +upload: + title: "Running test" + type: "freestyle" # Run any command + image: "node:14.19.0" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "ci_env=`curl -s https://codecov.io/env`" + - "npm install codecov -g" + - "codecov -t ${{CODECOV_TOKEN}} -f ./coverage/clover.xml" + stage: "upload" +{% endraw %} +{% endhighlight %} + +The commands run inside of the node Docker image: +* `ci_env= curl -s https://codecov.io/env`: Sets the CI environment variable to take note that we are using Codefresh +* `npm install codecov -g`: Installs the odecov CLI +* `codecov -t ${{CODECOV_TOKEN}} -f ./coverage/clover.xml`: Sets the Codevoc access token provided in the UI when we connect to a new Git repository and point to the file that contains our coverage report. + +Once you run the pipeline, the steps will create the coverage report and forward it to Codecov. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-pipeline.png" +url="/images/testing/codecov/codecov-pipeline.png" +alt="Pipeline with codecov step" +max-width="50%" +%} + +## View reports + +You can view the updated coverage reports within the Codecov UI every time you make a commit and/or run the Codefresh pipeline directly. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-report.png" +url="/images/testing/codecov/codecov-report.png" +alt="Pipeline with codecov step" +max-width="50%" +%} + +You can access further information on the coverage report by opening the link to the file displayed in the table. + +{% include image.html +lightbox="true" +file="/images/testing/codecov/codecov-report-details.png" +url="/images/testing/codecov/codecov-report-details.png" +alt="Codecov report details" +max-width="50%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Sonarqube Integration]({{site.baseurl}}/docs/testing/sonarqube-integration/) + diff --git a/_docs/example-catalog/ci-examples/coveralls-testing.md b/_docs/example-catalog/ci-examples/coveralls-testing.md new file mode 100644 index 00000000..dd060c20 --- /dev/null +++ b/_docs/example-catalog/ci-examples/coveralls-testing.md @@ -0,0 +1,221 @@ +--- +title: "Coveralls coverage reports" +description: "How to forward coverage reports to Coveralls" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Coveralls](https://coveralls.io/){:target="\_blank"} is a web service that allows users to track the code coverage of their application over time in order to optimize the effectiveness of their unit tests. This section details how coverage reports can be generated and forwarded to Coveralls with every Codefresh build. + +Analysis reports displayed within Coveralls dashboard: +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-sample-app.png" +url="/images/testing/coveralls/coveralls-sample-app.png" +alt="Coveralls UI Analysis reports" +max-width="80%" +%} + +## Prerequisites for using Coveralls + +* A simple [Codefresh pipeline up and running](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +* A [Coveralls account](https://coveralls.io/) (free or enterprise) -- Note that all open-source projects are free on Coveralls +* A testing tool added to your project that produces coverage reports + +Coveralls supports [22 different language integrations](https://docs.coveralls.io/about-coveralls){:target="\_blank"}. Each example provided in the official documentation suggests several coverage report tools that can be used in combination with Coveralls. + +You could try it out by cloning our [node example application](https://github.com/codefresh-contrib/coveralls-sample-app){:target="\_blank"} that utilises [jest](https://jestjs.io/){:target="\_blank"}. + +## Prepare your repository + +If you are using your own application as an example, you have to make a few modifications to the repository. Please have a look at the Coveralls example section for other languages. + +First, install Coveralls in your project: +{% highlight yaml %} +{% raw %} +npm install coveralls --save-dev +{% endraw %} +{% endhighlight %} + +Coveralls requires a [script](https://github.com/nickmerwin/node-coveralls){:target="\_blank"} that takes standard input and sends it to coveralls.io to report your code coverage. Depending on the framework that you are using, you will have to add a different script to your application. + +Any coverage reports can be forwarded that are within a [lcov data format](http://ltp.sourceforge.net/coverage/lcov/geninfo.1.php){:target="\_blank"} (including [mocha's LCOV reporter](https://www.npmjs.com/package/mocha-lcov-reporter){:target="\_blank"}). For this, we are going to set-up a “bin” folder, and within the folder a coveralls.js file that contains the following content: + +{% highlight yaml %} +{% raw %} +#!/usr/bin/env node + +'use strict'; + +const { handleInput } = require('..'); + +process.stdin.resume(); +process.stdin.setEncoding('utf8'); + +let input = ''; + +process.stdin.on('data', chunk => { + input += chunk; +}); + +process.stdin.on('end', () => { + handleInput(input, err => { + if (err) { + throw err; + } + }); +}); +{% endraw %} +{% endhighlight %} + +## Create a Coveralls account + +Once you sign-up to Coveralls, you can add a new repository. The UI will then provide you with an access token to the repository. Take note of the token since it will be required in the next sections. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/add-repository.png" +url="/images/testing/coveralls/add-repository.png" +alt="Coveralls repository" +max-width="80%" +%} + +## Codefresh pipeline + + +In case the project that you want to use Coveralls in does not have a pipeline, [create a new pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/). + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/create-coveralls-pipeline.png" +url="/images/testing/coveralls/create-coveralls-pipeline.png" +alt="Create Coveralls Pipeline" +max-width="80%" +%} + +Once you ’create’ the pipeline, a standard codefresh.yml file is generated with three steps: +* The first step will clone your repository; +* The second step will both, build and push your repository to the container registry that you have connected with Codefresh; +* And the third step currently does not do much. +In the next section, we will modify the testing step. + +**Testing step** + +The testing step requires three different environment variables to connect to Coveralls: +* `export COVERALLS_SERVICE_NAME="codefresh"` +* `export COVERALLS_GIT_BRANCH="insert the branch that you will be using with your application"` +* `export COVERALLS_REPO_TOKEN="insert the secret repo token from coveralls.io"` + +{% highlight yaml %} +{% raw %} + test: + title: "Running test" + type: "freestyle" # Run any command + image: "node:15.2" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "export COVERALLS_SERVICE_NAME=${{COVERALLS_SERVICE_NAME}}" + - "export COVERALLS_GIT_BRANCH=${{CF_BRANCH}}" + - "export COVERALLS_REPO_TOKEN=${{COVERALLS_REPO_TOKEN}}" + - "npm install --save-dev jest" + - "npm run test" + stage: "test" +{% endraw %} +{% endhighlight %} + +We specify several variables within this step. Those, which start with ’CF’ are [Codefresh-specific steps]({{site.baseurl}}/docs/pipelines/variables/) and the value is automatically provided by Codefresh once you run the pipeline. Our entire codefresh.yml will look as such: + +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "test" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "anais-codefresh/coveralls-sample-app" + # CF_BRANCH value is auto set when pipeline is triggered + # Learn more at codefresh.io/docs/docs/pipelines/variables/ + revision: "${{CF_BRANCH}}" + git: "github" + stage: "clone" + + build: + title: "Building Docker image" + type: "build" + image_name: "anaisurlichs/coveralls-sample-app" + working_directory: "${{clone}}" + tag: "${{CF_BRANCH_TAG_NORMALIZED}}" + dockerfile: "Dockerfile" + stage: "build" + registry: "dockerhub" + + test: + title: "Running test" + type: "freestyle" # Run any command + image: "node:15.2" # The image in which command will be executed + working_directory: "${{clone}}" # Running command where code cloned + commands: + - "export COVERALLS_SERVICE_NAME=${{COVERALLS_SERVICE_NAME}}" + - "export COVERALLS_GIT_BRANCH=${{CF_BRANCH}}" + - "export COVERALLS_REPO_TOKEN=${{COVERALLS_REPO_TOKEN}}" + - "npm install --save-dev jest" + - "npm run test" + stage: "test" +{% endraw %} +{% endhighlight %} + +Once you run the pipeline the steps will create the coverage report and forward it to Coveralls. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-pipeline.png" +url="/images/testing/coveralls/coveralls-pipeline.png" +alt="Pipeline with Coveralls step" +max-width="80%" +%} + +## View reports + +You can view the updated coverage reports within Coveralls UI every time you make a commit and/or run the Codefresh pipeline directly. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-sample-app.png" +url="/images/testing/coveralls/coveralls-sample-app.png" +alt="Coveralls UI Analysis reports" +max-width="80%" +%} + +You can access further information on the coverage report by opening the link to the file displayed in the table. + +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-specific-report.png" +url="/images/testing/coveralls/coveralls-specific-report.png" +alt="Coveralls report details" +max-width="80%" +%} + +And view a the code coverage of a specific file: +{% include image.html +lightbox="true" +file="/images/testing/coveralls/coveralls-coverage.png" +url="/images/testing/coveralls/coveralls-coverage.png" +alt="Coveralls report details" +max-width="80%" +%} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Sonarqube Integration]({{site.baseurl}}/docs/testing/sonarqube-integration/) diff --git a/_docs/example-catalog/ci-examples/cpp-cmake.md b/_docs/example-catalog/ci-examples/cpp-cmake.md new file mode 100644 index 00000000..11f8e963 --- /dev/null +++ b/_docs/example-catalog/ci-examples/cpp-cmake.md @@ -0,0 +1,125 @@ +--- +title: "Compile and test a C++ application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any C/C++ application very easily as both `gcc` and `g++` are already offered in Dockerhub. There is also another example available with [C and make]({{site.baseurl}}/docs/example-catalog/ci-examples/c-make). + +## The example C++ project + +You can see the example project at [https://github.com/codefresh-contrib/cpp-sample-app](https://github.com/codefresh-contrib/cpp-sample-app){:target="\_blank"}. The repository contains a C++ starter project with a `CMakeLists.txt` file: + +* `cmake .` creates the makefiles. +* `make test` runs unit tests +* `make` compiles the code + +The project is also using the [boost testing libraries](https://www.boost.org/){:target="\_blank"}. + +## Cmake, g++ and Docker + +Creating a CI/CD pipeline for C is very easy, because Codefresh can run any [gcc image](https://hub.docker.com/_/gcc/){:target="\_blank"} that you wish. Gcc docker images already contain the `make` utility but not the the `cmake` one. Therefore we will first create a Dockerfile that has `g++`, cmake and the boost libraries. You can follow the same pattern for other development tools that you use. + + +Here is the Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM gcc:9.2 + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update && apt-get install -y cmake libgtest-dev libboost-test-dev && rm -rf /var/lib/apt/lists/* + +CMD ["cmake"] + +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the GCC image +1. Installs cmake and boost +1. Sets cmake as the default command + +## Create a CI pipeline for C++ applications + +We can now use the custom Docker image in order to compile/test the C++ application: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/cc/cpp-cmake-pipeline.png" +url="/images/learn-by-example/cc/cpp-cmake-pipeline.png" +alt="Compiling a C++ application in a pipeline" +caption="Compiling a C++ application in a pipeline" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/cpp-sample-app/blob/master/codefresh.yml){:target="\_blank"} that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - prepare + - build +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'codefresh-contrib/cpp-sample-app' + revision: master + git: github + build_dev_image: + title: Building Dev Image + stage: prepare + type: build + image_name: cmake + working_directory: ./dev/ + tag: 'latest' + dockerfile: Dockerfile + create_makefiles: + title: Create Makefiles + stage: prepare + image: ${{build_dev_image}} + commands: + - cmake . + compile_my_sources: + title: Compile + stage: build + image: ${{build_dev_image}} + commands: + - make + run_my_tests: + title: Test + stage: build + image: ${{build_dev_image}} + commands: + - make test +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. clones the source code +1. Creates a development docker image that has g++, cmake and boost +1. Runs cmake on the source code to create the make files +1. Compiles the source code +1. Runs unit tests + +You can add additional tools in the pipeline by extending the Dockerfile mentioned in the previous section. You can also +change the version of Gcc/g++ by starting from a different public or private Docker image. + + +## Related articles +[C example]({{site.baseurl}}/docs/example-catalog/ci-examples/c-make/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/codefresh-yaml/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/decryption-with-mozilla-sops.md b/_docs/example-catalog/ci-examples/decryption-with-mozilla-sops.md new file mode 100644 index 00000000..d091ca24 --- /dev/null +++ b/_docs/example-catalog/ci-examples/decryption-with-mozilla-sops.md @@ -0,0 +1,177 @@ +--- +title: "Decrypt with Mozilla SOPS" +description: "Store secrets in your repository and decrypt them using Mozilla SOPS" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/administration/create-a-codefresh-account/) +- A public and private GnuGP key pair +- A credentials yaml, that is encrypted using Mozilla SOPS, and stored in your repository + +## Example Java application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/mozilla-sops-app){:target="\_blank"}. + +The example application retrieves the system variable "password," from the pipeline and uses it to authenticate to a Redis database, but you are free to use any type of database of your choosing. + +```java + String password = System.getenv("password"); + String host = System.getProperty("server.host"); + + RedisClient redisClient = new RedisClient( + RedisURI.create("redis://" + password + "@" + host + ":6379")); + RedisConnection connection = redisClient.connect(); +``` + +Also in the example application is a simple unit test that ensures we are able to read and write data to the database. + +An encrypted credentials file is stored in the repository (along with a public key): + +`credentials.yaml` +```yaml +password: ENC[AES256_GCM,data:Jsth2tY8GhLgj6Jct27l,iv:3vcKVoD5ms29R5SWHiFhDhSAvvJTRzjn9lA6woroUQ8=,tag:OjkLvcHxE4m5RSCV7ej+FA==,type:str] +sops: + kms: [] + gcp_kms: [] + azure_kv: [] + lastmodified: '2020-03-30T19:12:49Z' + mac: ENC[AES256_GCM,data:jGMTkFhXjgGMdWBpaSWjGZP6fta3UuYjEsnqziNELQZ2cLScT9v+GKg/c8iJYv1Gfiz3aw4ivYYrWzwmZehIbPHaw3/XBv/VRCQhzRWYKaf6pPFUXIS7XALSf9L9VbGOXL/CGPRae3t3HpaOor+knd6iQk2WR3K9kSeib4RBSCE=,iv:WSP8hBwaBv3ymTGltBOaVVC1sT08IG4hwqESlG8rN9w=,tag:3hZvCuql+ASWe/Mm5Bl7xg==,type:str] + pgp: + - created_at: '2020-03-30T19:12:49Z' + enc: | + -----BEGIN PGP MESSAGE----- + hQGMA9TqgBq6RQVRAQv/UouNaHfxkJ5PwXLvda97Fgj/2ew2VXPAlAnLvoGvTsb2 + U4GXcaE7c4mYf7wSKF9k/F0FZTUEnd3CRji/OqjrNyvj5zI/9KGRABCKvzjsx+ZG + JolVnDifHl78Mor1CUPQ4JXasHKbVSlNLMGgDHIsvpeC7f7pIi8YDUDIa3/zXhFK + jcKzz4nlrW1Ph8zukmQk49Xvv6+DFj2NTptOB3U6mh79RCdnyCSRHxA3f0X00Pi5 + g0p5x46S5E04uC2wXrZv8i/gyQbLHxwjmdbLq+P1Peu4/i9eSZZOpx0mc1KJ2mjr + oKRvgnUFz3xuYrSNzjC1vM01UbuSytlwx+S3J7VVLPSZRso1sbgv2+ylUOAHS+gZ + 64uL0j/BZrF4wZI8y8zr0nJ6cZLiiF3LeXhfcuWJJ7+5p1OBEvfO+sWorLahIZTw + pogYPDpz4rGnrJRKBkNsVlYuUG8aNerIfhEBr6n//VJtt7QXTEXraLCTt4a6z/Fl + R6YSeNCKWQlURrTfm4Kv0lwBzMTLUb+Fg3HO8ShhiE9/2dKTSJkRJMVXRDp22Fm1 + vO/wMFUjg6Dkrj1LVqQ9zcXc5QElgc4mF/V7SazacbQ7/g67tVtUrTit9LXgR9A0 + k7wU5iT5oWLJtWwpkA== + =Il2p + -----END PGP MESSAGE----- + fp: C70833A85193F72C2D72CB9DBC109AFC69E0185D + encrypted_regex: password + version: 3.5.0 +``` +You cannot run the application locally, as it needs to run in the pipeline in order to use our environment variables to connect. + +## Create pipeline + +The pipeline contains four stages: + +- A stage for cloning +- A stage for importing the private/public keypair +- A stage for decrypting the credentials file +- A stage for packaging our jar and running unit tests + +{% include image.html +lightbox="true" +file="/images/examples/secrets/mozilla-sops-pipeline.png" +url="/images/examples/secrets/mozilla-sops-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +First, you need to add a pipeline variable, `PRIV_KEY`, for your private key. You can do that in the UI by navigating to the in-line YAML editor and to the right-hand side, you will find the **Variables** tab: + +{% include image.html +lightbox="true" +file="/images/examples/secrets/mozilla-sops-pipeline-vars.png" +url="/images/examples/secrets/mozilla-sops-pipeline-vars.png" +alt="Mozilla SOPS Pipeline Variables" +caption="Pipeline Variables" +max-width="90%" +%} + +You can also add this [directly in the YAML itself]({{site.baseurl}}/docs/how-to-guides/migrating-from-travis-ci/#environment-variables). + +Here is the entire pipeline: + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +# More examples of Codefresh YAML can be found at +# https://codefresh.io/docs/docs/example-catalog/ci-examples/ + +version: "1.0" +# Stages can help you organize your steps in stages +stages: + - "clone" + - "import" + - "decrypt" + - "package" + +steps: + clone: + title: "Cloning repository..." + type: "git-clone" + stage: "clone" + arguments: + repo: "codefresh-contrib/mozilla-sops-app" + revision: "master" + + import_keys: + title: "Importing gpg keys..." + type: "freestyle" + stage: "import" + working_directory: '${{clone}}' + arguments: + image: "vladgh/gpg" + commands: + - gpg --import public.key + - echo -e "${{PRIV_KEY}}" > private.key + - gpg --allow-secret-key-import --import private.key + + decrypt_password: + title: "Decrypting password..." + type: "freestyle" + working_directory: "${{clone}}" + stage: "decrypt" + arguments: + image: "mozilla/sops" + commands: + - cp -r /codefresh/volume/.gnupg /root/.gnupg + - cf_export password=$(sops --decrypt --extract '["password"]' credentials.yaml) + + package_jar: + title: "Packaging jar and running unit tests..." + working_directory: ${{clone}} + stage: "package" + arguments: + image: "maven:3.5.2-jdk-8-alpine" + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dserver.host=my-redis-db-host clean package + services: + composition: + my-redis-db-host: + image: 'redis:4-alpine' + command: 'redis-server --requirepass $password' + ports: + - 6379 +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the main repository through a [git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Uses a GPG image and imports the public and private key pair through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Decrypts the credentials file through a different freestyle step. At this step, SOPS looks for the .gnupg directory (where the keyring is stored) under /root. We need to copy it from the [Codefresh Volume]({{site.baseurl}}/docs/pipelines/steps/freestyle/#custom-volumes), as /root is not saved between containers. +4. The last step, `package_jar`, does a few special things to take note of: + - Spins up a [Service Container]({{site.baseurl}}/docs/pipelines/service-containers/) running Redis on port 6379 , and sets the password to the database using our exported environment variable + - Sets `maven.repo.local` to cache Maven dependencies into the local codefresh volume to [speed up builds]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/#caching-the-maven-dependencies) + - Runs unit tests and packages the jar. Note how you can directly refer to the service container's name (`my-redis-db-host`) when we set `server.host` + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Vault secrets in pipelines]({{site.baseurl}}/docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline/) + diff --git a/_docs/example-catalog/ci-examples/django.md b/_docs/example-catalog/ci-examples/django.md new file mode 100644 index 00000000..fcc3e75d --- /dev/null +++ b/_docs/example-catalog/ci-examples/django.md @@ -0,0 +1,174 @@ +--- +title: "Python Django example" +description: "Create Docker images for Python applications" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/django/ + - /docs/python/django/ +toc: true +--- +Codefresh can work with Python projects using any of the popular frameworks. In this page we will see Django. For a Flask example see the [quick start guide]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/). + +## The example Django project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-python-django](https://github.com/codefreshdemo/cf-example-python-django){:target="\_blank"}. The repository contains a Django starter project with the following commands: + +* `pip install -r requirements.txt` install dependencies. +* `python -m unittest composeexample.utils` runs unit tests. +* `python manage.py runserver 0.0.0.0:8000` to start the application locally. + + +Once launched the application presents the Django starter page at localhost:8000. + +## Django and Docker + +The easiest way to build a Django application is with a Dockerfile that contains everything. This is very convenient as the Docker image can contain everything you need (i.e. app plus test frameworks) inside a pipeline. + + +Here is the Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM python:3.6-slim + +ENV PYTHONDONTWRITEBYTECODE 1 +ENV PYTHONUNBUFFERED 1 +RUN mkdir /code +WORKDIR /code +RUN pip install --upgrade pip +COPY requirements.txt /code/ + +RUN pip install -r requirements.txt +COPY . /code/ + +EXPOSE 8000 + +CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"] +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the Python image +1. Sets some environment variables +1. Copies the dependencies file inside the container +1. Upgrades pip and installs all dependencies +1. Copies the rest of the source code +1. Starts the Django app + +You can build this image locally on your workstation and then launch it to test the application. + +### Create a CI pipeline for Python/Django + +Creating a CI/CD pipeline for Django is very easy if you already have the Dockerfile with all required dependencies. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/python/python-build-test.png" +url="/images/learn-by-example/python/python-build-test.png" +alt="Creating a Docker image for Python" +caption="Creating a Docker image for Python" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - test +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefreshdemo/cf-example-python-django' + revision: master + git: github + build_my_image: + title: Building Docker Image + stage: build + type: build + image_name: my-django-image + working_directory: ./ + tag: master + dockerfile: Dockerfile + test_my_image: + title: Running unit tests + stage: test + image: '${{build_my_image}}' + commands: + - python -m unittest composeexample.utils +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, creates a Docker image and then uses the same image to run unit tests. Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +### Running tests before building the docker image + +Sometimes if you have a complex application you might want to run integration tests (or other Python commands), *before* building the Docker image. This scenario is also supported natively by Codefresh. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/python/python-test-build.png" +url="/images/learn-by-example/python/python-test-build.png" +alt="Building the image after tests have run" +caption="Building the image after tests have run" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefreshdemo/cf-example-python-django/blob/master/codefresh-build-after-test.yml){:target="\_blank"} builds the docker image after tests have already executed. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefreshdemo/cf-example-python-django' + revision: master + git: github + test_the_code: + title: Run unit tests + stage: test + image: python:3.6-slim + commands: + - pip install -r requirements.txt --cache-dir=/codefresh/volume/pip-cache + - python -m unittest composeexample.utils + build_my_image: + title: Building Docker Image + stage: build + type: build + image_name: my-django-image + working_directory: ./ + tag: full + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/pipeline-caching/) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for pip which keeps its cache externally (e.g. `~/.cache/pip`). By changing the location of the Pip cache on the project folder (the `pip-cache` name is arbitrary) we make sure that Codefresh will cache automatically the Pip libraries resulting in much faster builds. + +## Related articles +[Python examples]({{site.baseurl}}/docs/example-catalog/ci-examples/python/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/dotnet.md b/_docs/example-catalog/ci-examples/dotnet.md new file mode 100644 index 00000000..173a9569 --- /dev/null +++ b/_docs/example-catalog/ci-examples/dotnet.md @@ -0,0 +1,115 @@ +--- +title: "C# on .NET Core" +description: "How to build a C# project in Codefresh" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any .NET core application very easily as there are official [Docker images from Microsoft](https://hub.docker.com/_/microsoft-dotnet-core){:target="\_blank"}. + +## The example C# project + +You can see the example project at [https://github.com/dotnet-architecture/eShopOnWeb](https://github.com/dotnet-architecture/eShopOnWeb){:target="\_blank"}. The repository contains a C# Web project with 3 kinds of tests. It has different tags for each version of .NET Core and has + +* a `docker-compose.yml` file for local development +* a `tests` directory with all types of tests +* a Dockerfile at `/src/Web` + +There are also previous releases at [https://github.com/dotnet-architecture/eShopOnWeb/releases](https://github.com/dotnet-architecture/eShopOnWeb/releases){:target="\_blank"}. + +### Create a CI pipeline for C# applications + +Creating a CI/CD pipeline for C# is very easy, because Codefresh can run any SDK image version that you wish. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/dotnet/dotnetcore-pipeline.png" +url="/images/learn-by-example/dotnet/dotnetcore-pipeline.png" +alt="Compiling a C# application in a pipeline" +caption="Compiling a C# application in a pipeline" +max-width="80%" +%} + +Here is the full pipeline that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: checkout + type: git-clone + repo: 'dotnet-architecture/eShopOnWeb' + revision: 'netcore3.0' + git: github-1 + my_unit_tests: + title: Unit tests + stage: test + image: mcr.microsoft.com/dotnet/core/sdk:3.0 + working_directory: './tests/UnitTests/' + commands: + - dotnet test + my_integration_tests: + title: Integration tests + stage: test + image: mcr.microsoft.com/dotnet/core/sdk:3.0 + working_directory: './tests/IntegrationTests/' + commands: + - dotnet test + my_functional_tests: + title: Fuctional tests + stage: test + image: mcr.microsoft.com/dotnet/core/sdk:3.0 + working_directory: './tests/FunctionalTests/' + commands: + - dotnet test + my_app_docker_image: + title: Building Docker Image + type: build + stage: build + image_name: dotnetcore-eshop + working_directory: ./ + tag: latest + dockerfile: src/Web/Dockerfile +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. clones the source code +1. Uses the official `mcr.microsoft.com/dotnet/core/sdk:3.0` image to run unit/integration/functional tests in 3 different folders +1. Builds the application docker image using the root folder as Docker context but with the Dockerfile located at `./src/Web` + + + + + +## Related articles +[C/C++ examples]({{site.baseurl}}/docs/learn-by-example/cc/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + + + + + diff --git a/_docs/example-catalog/ci-examples/fan-in-fan-out.md b/_docs/example-catalog/ci-examples/fan-in-fan-out.md new file mode 100644 index 00000000..a8c2b3d1 --- /dev/null +++ b/_docs/example-catalog/ci-examples/fan-in-fan-out.md @@ -0,0 +1,204 @@ +--- +title: "Fan-out-fan-in pipeline" +description: "Use parallel mode to fan-in and fan-out your step dependencies" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +In pipelines, the concept of fan-in/fan-out is depicted in the diagram below. This pipeline offers parallel sub-flows within the same pipeline. Fan-out refers to spreading a task to multiple destinations in parallel, and fan-in is the opposite, where we spread multiple tasks to the same destination. + +{% include image.html +lightbox="true" +file="/images/examples/unit-tests/parallel-pipeline-examples.png" +url="/images/examples/unit-tests/parallel-pipeline-examples.png" +alt="parallel pipeline diagraam" +caption="Parallel Mode Diagram" +max-width="100%" +%} + +As you can see in the diagram, Step1 fans out to Step2 and Step4 (which run in parallel), while Step3 and Step4 fan-in to Step5. + +You can achieve parallelism in your Codefresh pipelines by using the following: + +- Simple parallel jobs ([inserting parallel steps into a sequential pipeline]({{site.baseurl}}/docs/pipelines/advanced-workflows/#inserting-parallel-steps-in-a-sequential-pipeline)) +- [Full parallel mode]({{site.baseurl}}/docs/pipelines/advanced-workflows/#parallel-pipeline-mode) +- Fan-out/fan-in parallel pipelines, as described in this article + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) + +## Example project + +You can find the example Spring boot application on [GitHub](https://github.com/codefresh-contrib/fan-out-fan-in-sample-app.git){:target="\_blank"}. It is a simple Hello World application with several different types of tests we will use to run using Codefresh's parallel mode. + +## Create the pipeline + +Our pipeline will have five stages: setup, start, web-tests, smoke, and end: + +{% include image.html +lightbox="true" +file="/images/examples/unit-tests/fan-in-fan-out-pipeline.png" +url="/images/examples/unit-tests/fan-in-fan-out-pipeline.png" +alt="fan-in-fan-out UI pipeline view" +caption="Codefresh UI Pipeline View" +max-width="100%" +%} + +You should be able to copy and paste this YAML in the in-line editor in the Codefresh UI. It will automatically clone the project for you. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +mode: parallel +stages: +- setup +- start +- web-tests +- smoke +- end +steps: + Clone: + title: Cloning main repository... + stage: setup + type: git-clone + arguments: + repo: codefresh-contrib/fan-out-fan-in-sample-app + git: github + revision: master + Build_image: + title: Building Docker Image... + type: build + stage: setup + working_directory: ${{Clone}} + arguments: + image_name: spring-backend + tag: latest + dockerfile: Dockerfile + when: + steps: + - name: Clone + on: + - success + Step1: + title: Running unit tests... + stage: start + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="unit" test + when: + steps: + - name: Build_image + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step2: + title: Running web mock test... + stage: web-tests + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="web-mock" test + when: + steps: + - name: Step1 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step3: + title: Running smoke test... + stage: smoke + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="smoke" test + when: + steps: + - name: Step2 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step4: + title: Running web layer tests... + stage: web-tests + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="web-layer" test + when: + steps: + - name: Step1 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 + Step5: + title: Running integration tests... + stage: end + working_directory: ${{Clone}}/complete + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dgroups="integration" test + when: + steps: + - name: Step3 + on: + - success + - name: Step4 + on: + - success + services: + composition: + spring_backend: + image: ${{Build_image}} + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +>Note the special use of `mode: parallel` declared at the root of our yaml. This syntax makes the pipeline use the full parallel mode. +The order of your build steps doesn't matter in this case, each step is executed according to its [condition]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/). + +- Step1 (unit tests) fans out to Step2 and Step4 (web tests), which run in parallel +- Step3 (smoke tests) does not execute until Step2 is completed +- Step3 and Step4 fan in to the final step, Step5 (integration tests) + +This pipeline consists of the following: + +1. Clones the main repository through a [Git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Builds the cloned source code into a Docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +3. Runs [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that: + - Run unit tests according to their respective @Tags + - Use the image built in the second step as a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Parallel pipeline mode]({{site.baseurl}}/docs/pipelines/advanced-workflows/#parallel-pipeline-mode) + diff --git a/_docs/example-catalog/ci-examples/general.md b/_docs/example-catalog/ci-examples/general.md new file mode 100644 index 00000000..3cec98bc --- /dev/null +++ b/_docs/example-catalog/ci-examples/general.md @@ -0,0 +1,16 @@ +--- +title: "General" +description: "" +group: example-catalog +redirect_from: + - /docs/learn-by-example/general/ +toc: true +--- +This section contains Codefresh examples based on other technologies. +{% comment %} +links not available in base documentation +- [How to trigger the another pipeline using cf-cli](doc:how-to-trigger-another-pipeline-using-cf-cli) +- [How to run composition using cf-cli](doc:how-to-run-composition-using-cf-cli-1) +- [How to spin up image using cf-cli](doc:how-to-spin-up-image-using-cf-cli) +{% endcomment %} +- [Selenium test]({{site.baseurl}}/docs/learn-by-example/general/selenium-test/) diff --git a/_docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process.md b/_docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process.md new file mode 100644 index 00000000..b071c29b --- /dev/null +++ b/_docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process.md @@ -0,0 +1,77 @@ +--- +title: "Use Git Hash in CI" +description: "Get short SHA ID and use it in a CI Process" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/how-to-guides/ + - /docs/how-get-first-8-digits-of-sha/ +toc: true +old_url: /docs/how-get-first-8-digits-of-sha +--- + +## Get the short SHA ID +Add the following variable to your script: + +{% highlight text %} +{% raw %} +${{CF_SHORT_REVISION}} +{% endraw %} +{% endhighlight %} + + +## Use the SHA ID in a tag + + +{% highlight text %} +{% raw %} +tag: ${{CF_SHORT_REVISION}} +{% endraw %} +{% endhighlight %} + + +## YAML example + +{% highlight yaml %} +{% raw %} +step-name: + type: build + description: Free text description + working-directory: ${{clone-step-name}} + dockerfile: path/to/Dockerfile + image-name: owner/new-image-name + tag: ${{CF_SHORT_REVISION}} + build-arguments: + - key=value + fail-fast: false +{% endraw %} +{% endhighlight %} + +## Result in [hub.docker](https://hub.docker.com){:target="_blank"} + +{% include image.html +lightbox="true" +file="/images/examples/git/sha-id-docker-hub.png" +url="/images/examples/git/sha-id-docker-hub.png" +alt="SHA ID in Docker Hub" +caption="SHA ID in Docker Hub" +max-width="60%" +%} + +## Result in Codefresh + +{% include image.html +lightbox="true" +file="/images/examples/git/sha-id-codefresh.png" +url="/images/examples/git/sha-id-codefresh.png" +caption="SHA ID in Codefresh" +alt="SHA ID in Codefresh" +max-width="60%" +%} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/git-checkout-custom.md b/_docs/example-catalog/ci-examples/git-checkout-custom.md new file mode 100644 index 00000000..9a17e018 --- /dev/null +++ b/_docs/example-catalog/ci-examples/git-checkout-custom.md @@ -0,0 +1,106 @@ +--- +title: "Using custom Git commands" +description: "Manually clone Git repositories" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/git-clone-private-repository-using-freestyle-step/ + - /docs/example-catalog/ci-examples/git-clone-private-repository-using-freestyle-step/ +toc: true +--- + +>Manually running Git commands is an advanced technique. For most use cases you should use the [native Git checkout]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) offered by Codefresh. + +For complex cloning, you can still use custom clone commands in a freestyle step. In this case, +you lose the native Codefresh integration such as Git authentication and automatic workdir setup. Use custom clone commands only as a last resort. + + +## Cloning with the Git executable + +It is very easy to run custom Git commands in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). Pass any parameters to the Git clone step as you would pass them on your local workstation. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomClone: + title: Performing swallow clone + image: alpine/git:latest + commands: + - rm -rf ruby-on-rails-sample-app + - git clone --depth 1 https://github.com/codefresh-contrib/ruby-on-rails-sample-app.git + PrintFileList: + title: 'Listing files' + image: alpine:latest + working_directory: './ruby-on-rails-sample-app' + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +Notice the `rm` command before the clone step. This makes sure that every time the pipeline runs, the `git clone` step is implemented in an empty directory. Otherwise the `git clone` command will fail (Git will refuse to clone on an existing directory). + +You can enter your own Git username/password or [reuse the credentials]({{site.baseurl}}/docs/pipelines/steps/git-clone/#reuse-a-git-token-from-codefresh-integrations) from the Codefresh integration. + +## Manually running Git commands + +Once you understand that you can manually run Git commands in Codefresh pipelines, it is easy to see that any Git workflow is possible. +Here is an example where an application is packaged in a Docker container, after merging `master` to a specific branch. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomClone: + title: Performing swallow clone + image: alpine/git:latest + commands: + - rm -rf example_nodejs_postgres + - git clone https://github.com/kostis-codefresh/example_nodejs_postgres + - cd example_nodejs_postgres + - git checkout experiment1 + - git merge master + - git status + myDockerImage: + title: 'BuildingDockerImage' + type: build + dockerfile: Dockerfile + working_directory: './example_nodejs_postgres' + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +If there are any errors with the merge, the pipeline fails automatically. Codefresh automatically stops any pipeline that shows an error in a step. + +## Other forms of cloning + +There is nothing special about running Git it in a freestyle step. In fact, you can check out code with any other command that you would run locally in your terminal. + +Here is an example with Golang. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomClone: + title: Download example + image: golang:1.11-alpine + commands: + - apk add --no-cache git + - go get github.com/golang/example/hello +{% endraw %} +{% endhighlight %} + +If you run this pipeline you will see git used as part of the `go get` mechanism. + +More examples such as using SSH keys and working with GIT submodules can be found in the [clone step documentation]({{site.baseurl}}/docs/pipelines/steps/git-clone/). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Native Git checkout]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) +[Native Git integration]({{site.baseurl}}/docs/integrations/git-providers/) +[Freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) +[Git Clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) + diff --git a/_docs/example-catalog/ci-examples/git-checkout.md b/_docs/example-catalog/ci-examples/git-checkout.md new file mode 100644 index 00000000..81cc9b23 --- /dev/null +++ b/_docs/example-catalog/ci-examples/git-checkout.md @@ -0,0 +1,203 @@ +--- +title: "Check out Git repositories" +description: "Use the Codefresh native GIT integration" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh has native support for Git repositories and Git triggers. First you need to set up a [Git integration]({{site.baseurl}}/docs/integrations/git-providers/) (your administrator might also have done this for you already). + +{% include image.html +lightbox="true" +file="/images/integrations/git/git-integrations.png" +url="/images/integrations/git/git-integrations.png" +alt="GIT integrations" +caption="GIT integrations" +max-width="70%" +%} + +You can add a new integration for any cloud provider or even [on-premises]({{site.baseurl}}/docs/reference/behind-the-firewall/) ones. By default you will also have a provider set up if you used one for Codefresh signup (GitHub, GitLab or Bitbucket). + +For each Git Integration, make sure that you note down its name, as you will use in your pipeline inside a [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step. + + +## Cloning a specific repository + +The simplest way to clone using your git provider is by specifying the exact repository details. +Here is a pipeline that clones a git repository and creates a Docker image from a Dockerfile: + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: kostis-codefresh/example_nodejs_postgres + revision: master + git: github-1 + myDockerImage: + title: 'Building My Docker Image' + type: build + dockerfile: Dockerfile + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +This syntax is very simple to use, but it has the disadvantage that ties your pipeline to a specific repository. This makes +the pipeline impossible to re-use among different micro-services (that are built in a similar manner). + +## Cloning the triggered repository (recommended) + +The proper way to use git-clone steps is to make them trigger specific. Instead of hard-coding the git repository that is checked-out, it is best to checkout the same one that [triggered the pipeline]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/). This is what you want in most scenarios anyway. + +This can be achieved by using Codefresh [variables]({{site.baseurl}}/docs/pipelines/variables/) to refer to the trigger. +Here is the same pipeline as before, written in a generic way: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: github-1 + myDockerImage: + title: 'Building My Docker Image' + type: build + dockerfile: Dockerfile + image_name: my-app-image + tag: ${{CF_BRANCH_TAG_NORMALIZED}} +{% endraw %} +{% endhighlight %} + +The big advantage of this pipeline is that it can be reused for *ALL* your projects that follow the same pattern of having a Dockerfile in the root of the git repository. + +{% include image.html +lightbox="true" +file="/images/examples/checkout/add-new-microservice.png" +url="/images/examples/checkout/add-new-microservice.png" +alt="Reusing a pipeline between microservices" +caption="Reusing a pipeline between microservices" +max-width="50%" +%} + +Thus you can have a single pipeline and when you want to enable it for a new micro-service you can simply add a new [git trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) for it. + +You still run the pipeline manually if you wish. In this case you will be asked which trigger you want to "simulate" so that the variable pipelines are correctly replaced by Codefresh. + +{% include image.html +lightbox="true" +file="/images/examples/checkout/simulate-trigger.png" +url="/images/examples/checkout/simulate-trigger.png" +alt="Simulating a GIT trigger" +caption="Simulating a GIT trigger" +max-width="50%" +%} + +This is the recommended way of creating re-usable pipelines in Codefresh. + +## Cloning a repository with Codefresh Runner +If you have the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) installed, you need to use +the fully qualified path of the Git repository: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: https://github-internal.example.com/my-username/my-app + revision: '${{CF_REVISION}}' + git: my-internal-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +More details can be found in the [private Git instructions page]({{site.baseurl}}/docs/reference/behind-the-firewall/#checking-out-code-from-a-private-git-repository). + + +## Working inside the cloned directory + +Normally each [pipeline step]({{site.baseurl}}/docs/pipelines/steps/) in Codefresh can be named as you want. Specifically, for the Git-clone step however the name `main_clone` is special. + +If you name your clone step as `main_clone`, Codefresh automatically changes the working directory for all the next (non Git-clone) pipeline steps, to be the same as the project that was just checked out. This only applies to [built-in]({{site.baseurl}}/docs/pipelines/steps/#built-in-steps) Codefresh steps and not [custom plugins]({{site.baseurl}}/docs/pipelines/steps/#creating-a-typed-codefresh-plugin). + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/checkout.png" +url="/images/pipeline/introduction/checkout.png" +alt="Checkout structure" +caption="Checkout structure" +max-width="50%" +%} + +This is probably what you want anyway, so make sure that you name your Git-clone steps as `main_clone`. If you use any other name, then the working folder will be the parent of the checked-out project which is the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) at `/codefresh/volume`. + +If you have more then one clone step in a pipeline, it is recommended to define the working directory explicitly (see next example), instead +of depending on the `main_clone` naming convention, which is best used in pipelines with a single clone step. + +## Cloning multiple repositories + +You can use as many clone steps as you want and at any position in the pipeline. + +Here is an example where two repositories are checked out and two docker images are then built. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + checkoutApp1: + title: 'Cloning first repository...' + type: git-clone + repo: kostis-codefresh/example_nodejs_postgres + revision: experiment1 + git: github + myFirstDockerImage: + title: 'Building First Docker Image' + type: build + dockerfile: Dockerfile + image_name: my-nodejs-image + tag: from-develop-branch + working_directory: './example_nodejs_postgres' + checkoutApp2: + title: 'Cloning second repository...' + type: git-clone + repo: kostis-codefresh/trivial-go-web + revision: master + git: github + mySecondDockerImage: + title: 'Building Second Docker Image' + type: build + dockerfile: Dockerfile + working_directory: './trivial-go-web' + image_name: my-app-image + tag: from-master-branch +{% endraw %} +{% endhighlight %} + +Notice that in this case the git-clone steps are **not** named `main_clone` and therefore we specify exactly what is the working directory for each one. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Git integrations]({{site.baseurl}}/docs/integrations/git-providers/) +[Git triggers in pipelines]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Clone step in pipelines]({{site.baseurl}}/docs/pipelines/steps/git-clone/) +[Build step in pipelines]({{site.baseurl}}/docs/pipelines/steps/build/) +[Custom git commands]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout-custom/) diff --git a/_docs/example-catalog/ci-examples/gitops-secrets.md b/_docs/example-catalog/ci-examples/gitops-secrets.md new file mode 100644 index 00000000..1db214dc --- /dev/null +++ b/_docs/example-catalog/ci-examples/gitops-secrets.md @@ -0,0 +1,229 @@ +--- +title: "Secrets with GitOps" +description: "Store secrets in Git with Bitnami sealed secrets" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) +- A Kubernetes cluster +- The [Codefresh GitOps agent]({{site.baseurl}}/docs/integrations/argocd/) installed on the cluster + +## Using the Bitnami Sealed secrets controller + +If you follow [GitOps](https://codefresh.io/gitops/){:target="\_blank"}, then you should already know that everything should be placed under source control, and Git is to be used as the single source of truth. + +This presents a challenge with secrets that are needed by the application, as they must never be stored in Git in clear text under any circumstance. + +To solve this issue, we can use the [Bitnami Sealed secrets controller](https://github.com/bitnami-labs/sealed-secrets){:target="\_blank"}. This is a Kubernetes controller that can be used to encrypt/decrypt your application secrets in a secure way. + +The order of events is the following: + +1. You install the Bitnami Sealed secrets controller in the cluster. It generates a public and private key. The private key stays in the cluster and is never revealed. +1. You take a raw secret and use the `kubeseal` utility to encrypt it. Encryption happens with the public key of the cluster that you can give to anybody. +1. The encrypted secrets are stored in Git. There are safe to be committed and nobody can decrypt them without direct access to the cluster +1. During runtime you deploy the sealed secret like any other Kubernetes manifest. The controller converts them to [plain Kubernetes secrets](https://kubernetes.io/docs/concepts/configuration/secret/){:target="\_blank"} on the fly using the private key of the cluster +1. Your application reads the secrets like any other Kubernetes secret. Your application doesn't need to know anything about the sealed secrets controller or how the encryption decryption works. + + +To use the controller first install it in your cluster: + +``` +helm repo add sealed-secrets https://bitnami-labs.github.io/sealed-secrets +helm repo update +helm install sealed-secrets-controller sealed-secrets/sealed-secrets +``` + +By default, the controller is installed at the `kube-system` namespace. The namespace +and release names are important, since if you change the defaults, you need to set them up +with `kubeseal` as well, as you work with secrets. + +Download the `kubeseal` CLI: +``` +wget https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.16.0/kubeseal-linux-amd64 -O kubeseal +sudo install -m 755 kubeseal /usr/local/bin/kubeseal +``` + +## Example application + +You can find the example project at [https://github.com/codefresh-contrib/gitops-secrets-sample-app](https://github.com/codefresh-contrib/gitops-secrets-sample-app){:target="\_blank"}. + +It is a web application that prints out several secrets which are [read from the filesystem](https://github.com/codefresh-contrib/gitops-secrets-sample-app/blob/main/settings.ini){:target="\_blank"}: + +`settings.ini` +```ini +[security] +# Path to key pair +private_key = /secrets/sign/key.private +public_key= /secrets/sign/key.pub + +[paypal] +paypal_url = https://development.paypal.example.com +paypal_cert=/secrets/ssl/paypal.crt + +[mysql] +db_con= /secrets/mysql/connection +db_user = /secrets/mysql/username +db_password = /secrets/mysql/password +``` + +The application itself knows nothing about Kubernetes secrets, mounted volumes or any other cluster resource. It only reads its own filesystem at `/secrets` + +This folder is populated inside the pod with [secret mounting](https://github.com/codefresh-contrib/gitops-secrets-sample-app/blob/main/manifests/deployment.yml){:target="\_blank"}: + +```yaml +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gitops-secrets-deploy +spec: + replicas: 1 + selector: + matchLabels: + app: gitops-secrets-app + template: + metadata: + labels: + app: gitops-secrets-app + spec: + containers: + - name: gitops-secrets-app + image: docker.io/kostiscodefresh/gitops-secrets-sample-app:latest + imagePullPolicy: Always + ports: + - containerPort: 8080 + volumeMounts: + - name: mysql + mountPath: "/secrets/mysql" + readOnly: true + - name: paypal + mountPath: "/secrets/ssl" + readOnly: true + - name: sign-keys + mountPath: "/secrets/sign/" + readOnly: true + livenessProbe: + httpGet: + path: /health + port: 8080 + readinessProbe: + httpGet: + path: /health + port: 8080 + volumes: + - name: mysql + secret: + secretName: mysql-credentials + - name: paypal + secret: + secretName: paypal-cert + - name: sign-keys + projected: + sources: + - secret: + name: key-private + - secret: + name: key-public + +``` + +This way there is a clear separation of concerns. + + + +You can find the secrets themselves at [https://github.com/codefresh-contrib/gitops-secrets-sample-app/tree/main/never-commit-to-git/unsealed_secrets](https://github.com/codefresh-contrib/gitops-secrets-sample-app/tree/main/never-commit-to-git/unsealed_secrets){:target="\_blank"}. There are encoded with base64 so they are **NOT** safe to commit in Git. + +>Note that for demonstration purposes, the Git repository contains raw secrets so that you can encrypt them yourself. In a production application, the Git repository must only contain sealed/encrypted secrets. + +## Preparing the secrets + +The critical point of this application is to encrypt all the secrets and place them in Git. +By default, the sealed secrets controller encrypts a secret according to a specific namespace (this behavior is configurable), so you need to decide in advance which namespace wil host the application. + +Then encrypt all secrets as below: + +``` +kubectl create ns git-secrets +cd safe-to-commit/sealed_secrets +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/db-creds.yml > db-creds.json +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/key-private.yml > key-private.json +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/key-public.yml > key-public.json +kubeseal -n git-secrets < ../../never-commit-to-git/unsealed_secrets/paypal-cert.yml > paypal-cert.json +kubectl apply -f . -n git-secrets + +``` + +You now have encrypted your plain secrets. These files are safe to commit to Git. +You can see that they have been converted automatically to plain secrets with the command: + +``` +kubectl get secrets -n git-secrets +``` + +## Manually deploying the application + +Note that the application requires all secrets to be present: + +``` +cd safe-to-commit/manifests +kubectl apply -f . -n git-secrets +``` + +You can now visit the application url to see how it has access to all the secrets. + + +## Deploying the application with Codefresh GitOps + +Of course the big advantage of having everything committed into Git, is the ability to adopt GitOps +for the whole application (including secrets). + +This means that you can simply [point Codefresh GitOps to your repository]({{site.baseurl}}/docs/integrations/argocd/#creating-argocd-applications) and have the application +automatically deploy in the cluster. + +{% include image.html +lightbox="true" +file="/images/examples/sealed-secrets/add-app.png" +url="/images/examples/sealed-secrets/add-app.png" +alt="Creating a GitOps application" +caption="Creating a GitOps application" +max-width="50%" +%} + +You can then see the application in the GitOps dashboard: + +{% include image.html +lightbox="true" +file="/images/examples/sealed-secrets/current-state.png" +url="/images/examples/sealed-secrets/current-state.png" +alt="GitOps dashboard" +caption="GitOps dashboard" +max-width="90%" +%} + +If you visit its URL you will see the secrets being loaded: + +{% include image.html +lightbox="true" +file="/images/examples/sealed-secrets/app-secrets.png" +url="/images/examples/sealed-secrets/app-secrets.png" +alt="Application using secrets" +caption="Application using secrets" +max-width="90%" +%} + + +>Note that for simplicity reasons the same Git repository holds both the application source code and its +manifests. In an actual application, you should have two Git repositories (one of the source code only and one of the manifests). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Codefresh GitOps]({{site.baseurl}}/docs/ci-cd-guides/gitops-deployments/) +[Using secrets]({{site.baseurl}}/docs/pipelines/secrets-store/) +[Secrets with Mozilla Sops]({{site.baseurl}}/docs/example-catalog/ci-examples/decryption-with-mozilla-sops/) +[Vault Secrets in the Pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline/) + diff --git a/_docs/example-catalog/ci-examples/golang-hello-world.md b/_docs/example-catalog/ci-examples/golang-hello-world.md new file mode 100644 index 00000000..8c3e0c3f --- /dev/null +++ b/_docs/example-catalog/ci-examples/golang-hello-world.md @@ -0,0 +1,269 @@ +--- +title: "Create a Docker image for GO" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/go/cf-example-golang-hello-world/ +toc: true +--- + +Codefresh can work with Go projects of any version using built-in modules or any other dependency mechanism. + +## The example golang project + +You can see the example project at [https://github.com/codefresh-contrib/golang-sample-app](https://github.com/codefresh-contrib/golang-sample-app){:target="\_blank"}. The repository contains a simple Golang web application including unit tests. There are 3 Dockerfiles available: + +* [Simple Dockerfile](https://github.com/codefresh-contrib/golang-sample-app/blob/master/Dockerfile){:target="\_blank"} (with old Go version that requires `GOPATH` building) +* [Dockerfile with Go modules](https://github.com/codefresh-contrib/golang-sample-app/blob/master/Dockerfile.mod){:target="\_blank"} (optimized for Docker caching) +* [Multi-stage Dockerfile](https://github.com/codefresh-contrib/golang-sample-app/blob/master/Dockerfile.multistage){:target="\_blank"} (with Go modules and unit tests) + +Let's see these workflows in order. + +## Simple Docker image pipeline + +The most [simple pipeline](https://github.com/codefresh-contrib/golang-sample-app/blob/master/codefresh.yml){:target="\_blank"} that you can create is just two steps: +* A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to fetch the code +* A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create a Docker image + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-golang-image + working_directory: ./ + tag: full + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Once you run this pipeline Codefresh will create a Docker image for the Golang application: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/golang-simple-pipeline.png" +url="/images/learn-by-example/golang/golang-simple-pipeline.png" +alt="Simple pipeline for Golang" +caption="Simple pipeline for Golang" +max-width="80%" +%} + +The big advantage of this workflow is that the Dockerfile you use can define any Go version and dependency tool. As long as the Dockerfile is self-contained (i.e. it compiles GO on its own), the pipeline will work as expected. + +In the example application, the simple (unoptimized) Dockerfile has an old Go version that still requires `GOPATH` folders. + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.10 + +# Set the Current Working Directory inside the container +WORKDIR $GOPATH/src/github.com/codefresh-contrib/go-sample-app + +# Copy everything from the current directory to the PWD (Present Working Directory) inside the container +COPY . . + +# Download all the dependencies +RUN go get -d -v ./... + +# Install the package +RUN go install -v ./... + +# This container exposes port 8080 to the outside world +EXPOSE 8080 + +# Run the executable +CMD ["go-sample-app"] +{% endraw %} +{% endhighlight %} + + +## Run unit tests as part of the pipeline + +If you want to run Go specific steps in your pipeline, you can use [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) steps with any GO image that you want. If your GO application is using GO modules, this is even easier as you don't need to place the application into a specific GOPATH compliant directory first. + +This [pipeline](https://github.com/codefresh-contrib/golang-sample-app/blob/master/codefresh-gomod.yml){:target="\_blank"} is running unit tests as a separate step and then builds the docker image. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - test + - build +steps: + main_clone: + title: Cloning main repository... + type: git-clone + stage: checkout + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyUnitTests: + title: Unit test + stage: test + image: 'golang:1.12' + commands: + - go test -v + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: my-golang-image + working_directory: ./ + tag: modules + dockerfile: Dockerfile.mod +{% endraw %} +{% endhighlight %} + +If the unit tests fail, then the docker image will never be created (Codefresh automatically stops a pipeline when there is an error). + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/golang-ci-pipeline.png" +url="/images/learn-by-example/golang/golang-ci-pipeline.png" +alt="Golang pipeline with unit tests" +caption="Golang pipeline with unit tests" +max-width="80%" +%} + +Notice that in this case we have added module support in the Go application. The new Dockerfile is the following: + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.12-alpine + +RUN apk add --no-cache git + +# Set the Current Working Directory inside the container +WORKDIR /app/go-sample-app + +# We want to populate the module cache based on the go.{mod,sum} files. +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +# Build the Go app +RUN go build -o ./out/go-sample-app . + + +# This container exposes port 8080 to the outside world +EXPOSE 8080 + +# Run the binary program produced by `go install` +CMD ["./out/go-sample-app"] +{% endraw %} +{% endhighlight %} + +The Dockerfile will also automatically take advantage of the Codefresh distributed docker cache. + + + +## Create a multi-stage Docker image for GO + +Especially with Go applications, the recommended way to create Docker images is with [multi-stage builds](https://docs.docker.com/develop/develop-images/multistage-build/){:target="\_blank"}. This makes the resulting Docker image as compact as possible. + +You can also embed unit tests in the Docker creation process, which guarantee the correctness of image (integration tests are best kept in the pipeline). + +Here is the new Dockerfile: + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.12-alpine AS build_base + +RUN apk add --no-cache git + +# Set the Current Working Directory inside the container +WORKDIR /tmp/go-sample-app + +# We want to populate the module cache based on the go.{mod,sum} files. +COPY go.mod . +COPY go.sum . + +RUN go mod download + +COPY . . + +# Unit tests +RUN CGO_ENABLED=0 go test -v + +# Build the Go app +RUN go build -o ./out/go-sample-app . + +# Start fresh from a smaller image +FROM alpine:3.9 +RUN apk add ca-certificates + +COPY --from=build_base /tmp/go-sample-app/out/go-sample-app /app/go-sample-app + +# This container exposes port 8080 to the outside world +EXPOSE 8080 + +# Run the binary program produced by `go install` +CMD ["/app/go-sample-app"] +{% endraw %} +{% endhighlight %} + +Codefresh has native support for multi-stage builds. The [pipeline](https://github.com/codefresh-contrib/golang-sample-app/blob/master/codefresh-multi-stage.yml){:target="\_blank"} is the same as the first one with just two steps. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/golang-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Multi-stage Image + type: build + image_name: my-golang-image + working_directory: ./ + tag: multi-stage + dockerfile: Dockerfile.multistage +{% endraw %} +{% endhighlight %} + +You should see a much smaller Docker image at the end. + + +## Viewing Docker images + +If you look at your [Docker registry dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images) created the advantages of the multi-stage build are very clear: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/golang-image-size.png" +url="/images/learn-by-example/golang/golang-image-size.png" +alt="Creating different Docker images" +caption="Creating different Docker images" +max-width="80%" +%} + +We recommend using Go modules and multi-stage builds in your Go projects. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + diff --git a/_docs/example-catalog/ci-examples/golang.md b/_docs/example-catalog/ci-examples/golang.md new file mode 100644 index 00000000..468e4eb8 --- /dev/null +++ b/_docs/example-catalog/ci-examples/golang.md @@ -0,0 +1,14 @@ +--- +title: "Go" +description: "How to build Golang applications with Codefresh CI/CD pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/go/ + - /docs/golang/ +toc: true +--- +This section contains Codefresh examples based on Go. + +- [Golang Docker Example]({{site.baseurl}}/docs/learn-by-example/golang/golang-hello-world/) +- [Golang with goreleaser]({{site.baseurl}}/docs/learn-by-example/golang/goreleaser/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/goreleaser.md b/_docs/example-catalog/ci-examples/goreleaser.md new file mode 100644 index 00000000..23cf3611 --- /dev/null +++ b/_docs/example-catalog/ci-examples/goreleaser.md @@ -0,0 +1,118 @@ +--- +title: "Compile and release a Go application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +[Goreleaser](https://github.com/goreleaser/goreleaser){:target="\_blank"} is a helper utility that allows you to easily create the following for Go applications: + +* Binary packages for each OS/arch +* Archives +* GitHub releases +* Docker images +* Snap/RPM/deb/Homebrew + + +Codefresh can also create Docker images on its own, but Goreleaser is still useful for the binary artifact creation capability. + + +## Run Goreleaser with docker + +You can see the example project at [https://github.com/codefresh-contrib/goreleaser-sample-app](https://github.com/codefresh-contrib/goreleaser-sample-app){:target="\_blank"}. The repository contains a simple Golang web application with a [goreleaser configuration](https://github.com/codefresh-contrib/goreleaser-sample-app/blob/master/.goreleaser.yml){:target="\_blank"}. + + +There is already a [Docker image for Goreleaser](https://hub.docker.com/r/goreleaser/goreleaser/){:target="\_blank"} so it is very easy to use it in Codefresh pipeline. +In the most simple case you case run goreleaser in a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + + `YAML` +{% highlight yaml %} +{% raw %} + ReleaseMyApp: + title: Creating packages + stage: release + image: 'goreleaser/goreleaser' + commands: + - goreleaser --snapshot --skip-publish --rm-dist +{% endraw %} +{% endhighlight %} + +More typically however you also need to provide a GitHub token so that GitHub releases are also available. There are two ways to do that. + + +## Create a CI pipeline that compiles/releases Go + +In most cases you want to just reuse the Git integration already defined in Codefresh. +This [pipeline](https://github.com/codefresh-contrib/goreleaser-sample-app/blob/master/codefresh.yml){:target="\_blank"} is using the GitHub token from [Git integration]({{site.baseurl}}/docs/integrations/git-providers/) in order to allow GitHub access. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - release +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + stage: prepare + BuildMyApp: + title: Compiling go code + stage: build + image: 'golang:1.12' + commands: + - go build + GetGitToken: + title: Reading GitHub token + stage: release + image: codefresh/cli + commands: + - cf_export GITHUB_TOKEN=$(codefresh get context github-1 --decrypt -o yaml | yq -y .spec.data.auth.password) + ReleaseMyApp: + title: Creating packages + stage: release + image: 'goreleaser/goreleaser' + commands: + - goreleaser --rm-dist +{% endraw %} +{% endhighlight %} + +Note that GoReleaser [requires a GitHub API token](https://goreleaser.com/environment/){:target="\_blank"} (`GITHUB_TOKEN`) with the `repo` scope to deploy artifacts to GitHub. +Here we use [cf_export]({{site.baseurl}}/docs/pipelines/variables/#exporting-environment-variables-from-a-freestyle-step) and the [codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} in order to ask Codefresh about the existing token (that was used in git integrations). In your case you need to change `github-1` with the name of your [GitHub integration]({{site.baseurl}}/docs/integrations/git-providers/). + +It also possible to pass a GITHUB_TOKEN directly in the pipeline, if you don't want to re-use the existing one. This is an alternative way of allowing Goreleaser to create GitHub releases. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/github-token.png" +url="/images/learn-by-example/golang/github-token.png" +alt="Passing a specific github token in the pipeline" +caption="Passing a specific github token in the pipeline" +max-width="70%" +%} + +You could also store the token in [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). +Regardless of the way you choose to pass the GitHub token, the final step is to make sure that your pipeline is only executed for tag events. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/golang/tags-only-trigger.png" +url="/images/learn-by-example/golang/tags-only-trigger.png" +alt="Run pipeline only on tag creation" +caption="Run pipeline only on tag creation" +max-width="80%" +%} + +This means that this pipeline will not run on normal commits. It is also possible to use [step conditionals]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) for more complex cases. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/gradle.md b/_docs/example-catalog/ci-examples/gradle.md new file mode 100644 index 00000000..73bc26ee --- /dev/null +++ b/_docs/example-catalog/ci-examples/gradle.md @@ -0,0 +1,207 @@ +--- +title: "Java Example with Gradle and Docker" +description: "Create Docker images for Spring/Gradle" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/java/gradle/ +toc: true +--- + +Codefresh can work with Gradle builds in a similar manner as with [Maven builds]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/){:target="\_blank"}. + +## The example Gradle project + +You can see the example project at [https://github.com/codefresh-contrib/gradle-sample-app](https://github.com/codefresh-contrib/gradle-sample-app){:target="\_blank"}. The repository contains a Spring Boot 2 project built with Gradle with the following tasks: + +* `gradle test` runs unit tests. +* `gradle build` creates a self-contained jar file (using Spring boot). + +Once launched the application presents a simple message at localhost:8080 and also at the various `/actuator/health` endpoints. + +## Gradle and Docker (multi-stage builds) + +The easiest way to use Gradle is with [multi-stage builds](https://blog.docker.com/2017/07/multi-stage-builds/){:target="\_blank"}. With multi-stage builds a Docker build can use one base image for compilation/packaging/unit tests and a different one that will hold the runtime of the application. This makes the final image more secure and smaller in size (as it does not contain any development/debugging tools). + +In the case of Gradle, you can use a base image that has the full JDK and Gradle itself, while the final image has the JRE and nothing else. + +The example project is actually using multi-stage builds by default. + +Here is the multi-stage Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM gradle:4.7.0-jdk8-alpine AS build +COPY --chown=gradle:gradle . /home/gradle/src +WORKDIR /home/gradle/src +RUN gradle build --no-daemon + +FROM openjdk:8-jre-slim + +EXPOSE 8080 + +RUN mkdir /app + +COPY --from=build /home/gradle/src/build/libs/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java", "-XX:+UnlockExperimentalVMOptions", "-XX:+UseCGroupMemoryLimitForHeap", "-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the Gradle image +1. Copies the Java source code inside the container +1. Compiles the code and runs unit tests (with `Gradle build`) +1. Discards the Gradle image with all the compiled classes/unit test results etc. +1. Starts again from the JRE image and copies **only** the JAR file created before + +We start Gradle without the long-running daemon, as the deamon is best used during local development only and not in CI/CD pipelines. + +### Create a CI pipeline for Gradle (multi-stage Docker builds) + +Because in multi-stage builds Docker itself handles most of the build process, moving the project to Codefresh is straightforward. We just need [a single step](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/gradle-sample-app' + revision: master + git: github + BuildingDockerImage: + title: Building Docker Image + stage: build + type: build + image_name: gradle-sample-app + working_directory: ./ + tag: 'multi-stage' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This will compile/test/package the Gradle application and create a Docker image. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/gradle-multistage.png" +url="/images/learn-by-example/java/gradle-multistage.png" +alt="Gradle Multi-stage Docker build" +caption="Gradle Multi-stage Docker build" +max-width="80%" +%} + +Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +## Packaging an existing Jar in a Docker image + +It also possible to have a simpler Dockerfile that only packages the final jar which was already created in the CI/CD pipeline (i.e. outside of Docker). + +A [simpler Dockerfile](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/Dockerfile.only-package){:target="\_blank"} is also provided at the same repository. It uses the base JRE image and just copies the JAR file inside the container. + + `Dockerfile.only-package` +{% highlight docker %} +{% raw %} +FROM openjdk:8-jre-slim + +EXPOSE 8080 + +RUN mkdir /app + +COPY build/libs/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java", "-XX:+UnlockExperimentalVMOptions", "-XX:+UseCGroupMemoryLimitForHeap", "-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] +{% endraw %} +{% endhighlight %} + +This means that _before_ building the Docker image, the compilation step (`gradle build`) is expected to be finished already. Therefore, in the `codefresh.yml` file we need at least two steps. The first one should prepare the JAR file and the second +one should create the Docker image. + +### Create a CI pipeline for a Gradle JAR + +The repository also contains a premade [Codefresh YAML file](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh-package-only.yml){:target="\_blank"} that creates a JAR file first and then packages it in a Docker image. + +Here are the full contents of the file. + + `codefresh-package-only.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - package + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/gradle-sample-app' + revision: master + git: github + MyUnitTests: + title: Compile/Unit test + stage: test + image: gradle:4.7.0-jdk8-alpine + commands: + - gradle test --no-daemon --build-cache --gradle-user-home=/codefresh/volume/.gradle -Dmaven.repo.local=/codefresh/volume/m2 + BuildMyJar: + title: Packaging Jar file + stage: package + image: gradle:4.7.0-jdk8-alpine + commands: + - gradle build --no-daemon --build-cache --gradle-user-home=/codefresh/volume/.gradle -Dmaven.repo.local=/codefresh/volume/m2 + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: gradle-sample-app + working_directory: ./ + tag: 'non-multi-stage' + dockerfile: Dockerfile.only-package +{% endraw %} +{% endhighlight %} + +The pipeline starts by checking out the code using a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). The next two steps are [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/), while the last one is a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/gradle-ci-pipeline.png" +url="/images/learn-by-example/java/gradle-ci-pipeline.png" +alt="Gradle pipeline" +caption="Gradle pipeline" +max-width="80%" +%} + +After checking out the code we use the standard [Gradle Docker image](https://hub.docker.com/_/gradle/){:target="\_blank"} to run unit tests. We also pass parameters that disable the Gradle daemon, enable the build cache and also change the cache folder to reside in the Codefresh volume. + +### Using the Gradle cache in Codefresh + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#how-caching-works-in-codefresh) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for Maven/Gradle which keep their cache externally. By changing the location of the Gradle cache we make sure that Codefresh will cache automatically the Gradle libraries resulting in much faster builds. We also place in the shared volume the local maven repo so that all jars that are created by Gradle (i.e. with an `install` task) are also available to the next pipeline stage. + +The next step is similar to the previous one, but this time we actually build the JAR file. We define again a custom cache folder so when you run the build you will see that Gradle will automatically pick the cache from the previous step. All Codefresh steps in a pipeline [run on the same workspace]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps), so the build results from one step are visible to the next. + +The last step is a Docker build. We name our image **gradle-sample-app** and tag it with a string `non-multi-stage` but of course you can use any other tag name that you wish. +Once the pipeline is finished you will see the Spring Boot 2 Docker image your [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images). + +## Related articles +[Spring Maven example]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/ci-examples/import-data-to-mongodb.md b/_docs/example-catalog/ci-examples/import-data-to-mongodb.md new file mode 100644 index 00000000..223ecc6c --- /dev/null +++ b/_docs/example-catalog/ci-examples/import-data-to-mongodb.md @@ -0,0 +1,60 @@ +--- + +title: "Import data to MongoDB" +description: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/import-data-to-mongodb-in-composition/ + - /docs/on-demand-test-environment/example-compositions/import-data-to-mongodb/ +toc: true +--- + +To import, restore, or for any operation before using MongoDB in your application, look at the following example. + +You just need to create Dockerfile for Mongo seed service and provide the command to prepare MongoDB. In this case, the command is `mongoimport`. + + `Dockerfile mongo_seed` +{% highlight docker %} +FROM mongo +COPY init.json /init.json +CMD mongoimport --host mongodb --db exampleDb --collection contacts --type json --file /init.json --jsonArray +{% endhighlight %} + +## Looking around +In the root of this repository you'll find a file named `docker-compose.yml`. +Let's quickly review the contents of this file: + + `docker-compose.yml` +{% highlight yaml %} +{% raw %} +version: '3' +services: + mongodb: + image: mongo + command: mongod --smallfiles + ports: + - 27017 + + mongo_seed: + image: ${{mongo_seed}} + links: + - mongodb + + client: + image: ${{build_prj}} + links: + - mongodb + ports: + - 9000 + environment: + - MONGO_URI=mongodb:27017/exampleDb +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +You can add the following example to your GitHub or Bitbucket account, and build the [example](https://github.com/codefreshdemo/cf-example-manage-mongodb){:target="_blank"}. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-mongo.md b/_docs/example-catalog/ci-examples/integration-tests-with-mongo.md new file mode 100644 index 00000000..1947496a --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-mongo.md @@ -0,0 +1,101 @@ +--- +title: "Integration Tests with Mongo" +description: "Launching a MongoDB service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/nodejsmongo/ + - /docs/testing/unit-tests/unit-tests-with-mongo/ +toc: true +--- + +In this example, we will see a NodeJS project that uses MongoDB for data storage. For the integration test phase we will launch an instance of MongoDB in order to run a set of [Mocha tests](https://mochajs.org/){:target="\_blank"}. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/mongodb-integration-tests.png" +url="/images/examples/integration-tests/mongodb-integration-tests.png" +alt="MongoDB integration tests with Codefresh" +caption="MongoDB integration tests with Codefresh" +max-width="90%" +%} + +The Mocha tests are looking for a MongoDB connection at `mongo:27017`. + +## The example NodeJS project + +You can see the example project at [https://github.com/codefreshdemo/example_nodejs_mongo](https://github.com/codefreshdemo/example_nodejs_mongo){:target="\_blank"}. The repository contains the NodeJS source code and the Mocha tests. + +You can play with it locally by using Docker compose to launch both the application and the MongoDB datastore. + +## Create a pipeline with MongoDB integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/example_nodejs_mongo" + revision: "master" + git: github + stage: prepare + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "node-mongo-app" + tag: "master" + dockerfile: "Dockerfile" + stage: build + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_app_image}}' + environment: + - MONGO_PORT=27017 + commands: + # MongoDB is certainly up at this point + - cd /src + - npm test + services: + composition: + mongo: + image: mongo:latest + ports: + - 27017 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: '${{build_app_image}}' + commands: + - "nslookup mongo" + - "nc -z mongo 27017" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with the application source code as well as the Mocha tests through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Runs Mocha tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active MongoDB instance + +Notice that we also use the `readiness` property in the testing phase so that we can verify MongoDB is ready and listening, before running the tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) + + + + diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-mysql.md b/_docs/example-catalog/ci-examples/integration-tests-with-mysql.md new file mode 100644 index 00000000..cccb9a43 --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-mysql.md @@ -0,0 +1,110 @@ +--- +title: "Integration Tests with MySQL" +description: "Launching a MySQL service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/nodejsmysql/ + - /docs/testing/unit-tests/unit-tests-with-mysql/ + - /docs/setup-unit-tests/ + - /docs/testing/unit-tests/unit-tests-with-composition/ + - /docs/run-unit-tests-with-composition/ + - /docs/unit-tests-with-database/ + - /docs/testing/unit-tests/unit-tests-with-database/ + - /docs/example-catalog/ci-examples/integration-tests-with-database/ +toc: true +--- + +In this example, we will see a NodeJS project that is using MySQL for data storage. For the integration test phase we will launch an instance of MySQL in order to run a simple integration test. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/mysql-integration-tests.png" +url="/images/examples/integration-tests/mysql-integration-tests.png" +alt="MySQL integration tests with Codefresh" +caption="MySQL integration tests with Codefresh" +max-width="90%" +%} + +The integration tests look for a MySQL connection at `test_mysql_db:3306`. + +## Example NodeJS project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-unit-tests-with-composition](https://github.com/codefreshdemo/cf-example-unit-tests-with-composition){:target=\_blank"}. The repository contains the NodeJS source code and the simple integration test. + +You can play with it locally by using Docker compose to launch both the application and the MySQL Database. + +## Create a pipeline with MySQL integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/cf-example-unit-tests-with-composition" + revision: "master" + git: github + stage: prepare + build_test_image: + title: "Building Test Docker Image" + type: "build" + image_name: "mysql-tests" + tag: "master" + dockerfile: "Dockerfile" + stage: build + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_test_image}}' + environment: &test_mysql_vars + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_USER=my_user + - MYSQL_PASSWORD=admin + - MYSQL_DATABASE=nodejs + - MYSQL_HOST=test_mysql_db + commands: + # MySQL is certainly up at this point + - cd /usr/src/app + - npm test + services: + composition: + test_mysql_db: + image: mysql:5.7 + ports: + - 3306 + environment: *test_mysql_vars # Same MYSQL_HOST, MYSQL_USER etc. + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: '${{build_test_image}}' + commands: + - "nslookup test_mysql_db" + - "nc -z test_mysql_db 3306" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with the integration test through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Runs the tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active MySQL instance passing the required environment variables (that match what the test is expecting). + +Notice that both the DB as well as the tests share a set of variables (`MYSQL_PASSWORD`, `MYSQL_USER` etc.) and thus we use [YAML anchors]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#using-yaml-anchors-to-avoid-repetition) to avoid duplication. + +Notice that we also use the `readiness` property in the testing phase so that we can verify MySQL is ready and listening, before running the tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-postgres.md b/_docs/example-catalog/ci-examples/integration-tests-with-postgres.md new file mode 100644 index 00000000..ee2a4110 --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-postgres.md @@ -0,0 +1,99 @@ +--- +title: "Integration Tests with Postgres" +description: "Launching a PostgreSQL service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/unit-tests-with-postgres/ + - /docs/testing/unit-tests/unit-tests-with-postgres/ +toc: true +--- + +In this example, we will see a NodeJS project that is using PostgreSQL for data storage. For the integration test phase we will launch an instance of PostgreSQL in order to run a simple integration test. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/postgresql-integration-tests.png" +url="/images/examples/integration-tests/postgresql-integration-tests.png" +alt="PostgreSQL integration tests with Codefresh" +caption="PostgreSQL integration tests with Codefresh" +max-width="90%" +%} + +The integration tests look for a PostgreSQL connection at `postgres:5432`. + +## Example NodeJS project + +You can see the example project at [https://github.com/codefreshdemo/example_nodejs_postgres](https://github.com/codefreshdemo/example_nodejs_postgres){:target="\_blank"}. The repository contains the NodeJS source code and the simple integration test. + +You can play with it locally by using Docker compose to launch both the application and the PostgreSQL Database. + +## Create a pipeline with PostgreSQL integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/example_nodejs_postgres" + revision: "master" + git: github + stage: prepare + run_integration_tests: + title: "Running integration tests" + stage: test + image: node:6.9.1 + environment: &test_postgresql_vars + - POSTGRES_USER=user + - POSTGRES_PASSWORD=admin + - POSTGRES_DB=todo + commands: + # PostgreSQL is certainly up at this point + - npm install -g gulp + - npm install + - npm test + services: + composition: + postgres: + image: postgres:11.5 + ports: + - 5432 + environment: *test_postgresql_vars # Same POSTGRES_USER, POSTGRES_PASSWORD etc. + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: postgres:11.5 + commands: + - "pg_isready -h postgres" + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Runs the tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active PostgreSQL instance passing the required environment variables (that match what the test is expecting). + +Notice that both the DB as well as the tests share a set of variables (`POSTGRES_USER`, `POSTGRES_PASSWORD` etc.) and thus we use [YAML anchors]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#using-yaml-anchors-to-avoid-repetition) to avoid duplication. + +Notice that we also use the `readiness` property in the testing phase so that we can verify PostgreSQL is ready and listening, before running the tests. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +[Preload a DB with tests data]({{site.baseurl}}/docs/example-catalog/ci-examples/populate-a-database-with-existing-data/) + + diff --git a/_docs/example-catalog/ci-examples/integration-tests-with-redis.md b/_docs/example-catalog/ci-examples/integration-tests-with-redis.md new file mode 100644 index 00000000..027a5710 --- /dev/null +++ b/_docs/example-catalog/ci-examples/integration-tests-with-redis.md @@ -0,0 +1,129 @@ +--- +title: "Integration Tests with Redis" +description: "Launching a Redis service container" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/python-redis/ + - /docs/testing/unit-tests/unit-tests-with-redis/ +toc: true +--- + +In this example, we will see a Python project that is using Redis for storing a web counter. For the integration test phase we will launch both the application and an instance of Redis in order to run a simple integration test. + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/redis-integration-tests.png" +url="/images/examples/integration-tests/redis-integration-tests.png" +alt="Redis integration tests with Codefresh" +caption="Redis integration tests with Codefresh" +max-width="90%" +%} + +The application will be launched with a hostname `web` while Redis will be at `redis:6379`. + +## Example Python project + +You can see the example project at [https://github.com/codefreshdemo/example_python_redis](https://github.com/codefreshdemo/example_python_redis){:target="\_blank"}. The repository contains the Python source code and a test script. + +You can play with it locally by using Docker compose to launch both the application and the Redis datastore. + +## Create a pipeline with Redis integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/example_python_redis" + revision: "master" + git: github + stage: prepare + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "python-redis-app" + tag: "latest" + dockerfile: "Dockerfile" + stage: build + build_test_image: + title: "Building Docker Test Image" + type: "build" + image_name: "python-redis-app-tests" + tag: "latest" + dockerfile: "Dockerfile.test" + stage: test + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_test_image}}' + commands: + # Redis and app are certainly up at this point + - sh ./test.sh + services: + composition: + redis: + image: redis:latest + ports: + - 6379 + web: + image: '${{build_app_image}}' + ports: + - 80 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: '${{build_test_image}}' + commands: + - "nslookup redis" + - "nslookup web" + - "nc -z redis 6379" + - "nc -z web 80" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with the application itself through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Builds a helper image that contains `nc` and `curl` that will be used for the integration tests. +1. Runs the test script while launching two [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) (one for the app and one for Redis). + +Notice that we also use the `readiness` property in the testing phase so that we can verify that both the application +as well as Redis are up, before running the tests. + +## Integration test script + +The integration test is very simple. It just uses `curl` to hit the Python endpoint and `grep` to check for a well known string. + + `test.sh` +{% highlight sh %} +#!bin/bash + +if curl web | grep -q 'Visits: '; then + echo "Tests passed!" + exit 0 +else + echo "Tests failed!" + exit 1 +fi +{% endhighlight %} + +Notice that we use the helper image both for running the test (because of `curl`) and for testing the readiness (because of `nc`). In a more complex application these could be two completely different images. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) diff --git a/_docs/example-catalog/ci-examples/java.md b/_docs/example-catalog/ci-examples/java.md new file mode 100644 index 00000000..c28cd55c --- /dev/null +++ b/_docs/example-catalog/ci-examples/java.md @@ -0,0 +1,15 @@ +--- +title: "Java" +description: "" +group: example-catalog +redirect_from: + - /docs/java/ +toc: true +--- +This section contains Codefresh examples based on Java: + +- [Spring Boot 2 with Maven]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/) +- [Gradle]({{site.baseurl}}/docs/learn-by-example/java/gradle/) +- [Publish a JAR]({{site.baseurl}}/docs/learn-by-example/java/publish-jar/) +- [Spring MVC JDBC Template]({{site.baseurl}}/docs/learn-by-example/java/spring-mvc-jdbc-template/) + diff --git a/_docs/example-catalog/ci-examples/launch-composition.md b/_docs/example-catalog/ci-examples/launch-composition.md new file mode 100644 index 00000000..4b010f39 --- /dev/null +++ b/_docs/example-catalog/ci-examples/launch-composition.md @@ -0,0 +1,85 @@ +--- +title: "Launch Compositions" +description: "Create a dynamic environment to preview your feature" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/launch-composition-1/ +toc: true +--- +Using this repository, we will help you get up to speed with basic functionality such as: building Docker images and launching compositions. +This project uses `Node JS` to build an application which will eventually become a distributable Docker image. + +## Looking around + +In the root of this repository you'll find a file named `codefresh.yml`. This is our [pipeline definition]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) and it describes the different steps that comprise our process. Let's quickly review the contents of this file: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +stages: + - prepare + - package + - launch +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: codefreshdemo/cf-example-launch-composition + revision: 'master' + git: github + stage: prepare + build_image: + title: Building Image + type: build + #Important: rename this image to to a valid repository in your registry. For example: myUserName/vote + image_name: example-launch-compose + #Dockerfile location should be relative to the working directory + dockerfile: Dockerfile + tag: master + stage: package + launch_composition: + title: Launch Composition + type: launch-composition + composition: + version: '2' + services: + app: + image: example-launch-compose:master + ports: + - 3000 + environment_name: 'cf-example-launch-composition' + entry_point: app + fail_fast: false + stage: launch +{% endhighlight %} + +The pipeline clones the source code, builds a docker image and then + [creates a preview environment]({{site.baseurl}}/docs/pipelines/steps/launch-composition/) with that image. + + +>**Your environments are limited** + Be aware that the number of environments you can run is limited. When using the same environment, define that the old one would terminate before launching the new environment. That way you can control the number of environments running in your account. + + +### Example + +Just head over to the example [**repository**](https://github.com/codefreshdemo/cf-example-launch-composition){:target=\_blank"} in GitHub and follow the instructions there. + + +Here is the end result: + +{% include image.html +lightbox="true" +file="/images/examples/composition/launch-composition-example.png" +url="/images/examples/composition/launch-composition-example.png" +alt="Launch composition example" +caption="Launch composition example" +max-width="90%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Unit tests]({{site.baseurl}}/docs/examples/example-catalog/ci-examples/run-integration-tests/) +[Integration tests]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-database/) +[Preview environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file.md b/_docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file.md new file mode 100644 index 00000000..47996ba4 --- /dev/null +++ b/_docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file.md @@ -0,0 +1,59 @@ +--- +title: "Use Docker compose" +description: "Launch a composition and define a service environment variable using a file" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/launching-a-composition-and-passing-a-service-environment-variable-using-a-file/ +toc: true +old_url: /docs/launching-a-composition-and-passing-a-service-environment-variable-using-a-file +--- +At times when launching a composition, you need to pass many environment variables to a specific service. +To do so, you can use `docker-compose 'env_file'` field on any service, and use files from the current working directory from which the composition is being launched. +This works for both `composition` and `launch-composition` step types. + +>**Note**: + When launching a composition directly from the Compositions view, using `env_file` does not work as it is being launched in an empty working directory. + Consider moving the composition launch as part of a usual pipeline which will give you ability to use files from your cloned repository. + + +## Examples +Compositions are launched within a working directory, which is the cloned repository by default. +This means that you can always reference an `env_file` just as would reference a `docker-compose` file. + + `Inline Composition` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + + inline_composition: + title: Launch inline composition + type: launch-composition + environment_name: 'environment name' + composition: + version: '3' + services: + service: + image: alpine + env_file: ./env-file +{% endraw %} +{% endhighlight %} + + + `Composition from file` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + + composition_from_file: + title: Launch composition from file + type: launch-composition + composition: './docker-compose.yml' + environment_name: 'environment name' +{% endraw %} +{% endhighlight %} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/lets-chat.md b/_docs/example-catalog/ci-examples/lets-chat.md new file mode 100644 index 00000000..b14e965b --- /dev/null +++ b/_docs/example-catalog/ci-examples/lets-chat.md @@ -0,0 +1,121 @@ +--- +title: "Let's Chat example" +description: "Create Docker images for Node/Express.js applications" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/lets-chat/ +toc: true +--- + +Let’s Chat is self-hosted chat app for small to big teams. + +## The example Node.JS project + +You can see the example project at [https://github.com/codefreshdemo/demochat](https://github.com/codefreshdemo/demochat){:target="\_blank"}. The repository contains the source code of the project along with two Dockerfiles (one for unit tests) and various docker-compose configurations + +The project requires a Mongo Database to work and by default it uses port 5000 for its web interface. + +## Create a CI pipeline for Node.js + +Creating a CI/CD pipeline for NodeJS is very easy, because Codefresh has built-in steps for creating Docker images and running commands with containers. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/nodejs/nodejs-pipeline.png" +url="/images/learn-by-example/nodejs/nodejs-pipeline.png" +alt="Building and testing a Node.js application" +caption="Building and testing a Node.js application" +max-width="100%" +%} + +Here is the [full pipeline](https://github.com/codefreshdemo/demochat/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "unit" + - "build" + - "integration" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefreshdemo/demochat" + revision: "master" + stage: "clone" + + build_dev_image: + title: "Building Dev image" + type: "build" + image_name: "codefreshdemo/demochat" + working_directory: "${{clone}}" + tag: "dev" + dockerfile: "Dockerfile.dev" + stage: "unit" + + test: + title: "Running test" + type: "freestyle" + image: ${{build_dev_image}} + working_directory: /root/demochat + commands: + - 'npm run test' + stage: "unit" + + build_image: + title: "Building App image" + type: "build" + image_name: "codefreshdemo/demochat" + working_directory: "${{clone}}" + tag: "dev" + dockerfile: "Dockerfile" + stage: "build" + + integration_step: + type: composition + stage: 'integration' + composition: + version: '2' + services: + app: + image: ${{build_image}} + links: + - mongo + ports: + - 5000 + mongo: + image: mongo + composition-candidates: + main: + image: nhoag/curl + command: bash -c "sleep 30 && curl http://app:5000/" | echo 'works' + +{% endraw %} +{% endhighlight %} + +> Note that you should change `codefreshdemo` in the clone step with your own Github account if you fork the repository. Also in both build steps you should change `codefreshdemo/demochat` with your own image name that is compliant to your Dockerhub account or other connected registry. + +This pipeline has 4 [stages]({{site.baseurl}}/docs/pipelines/stages/) and performs the following: + + 1. Clones the source code using the [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step + 1. Builds a Docker image for unit tests with the [build step]({{site.baseurl}}/docs/pipelines/steps/build/) + 1. Runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) in the Docker image that was just created with a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + 1. Building a Docker image for the final application + 1. Runs [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) using a [composition step]({{site.baseurl}}/docs/pipelines/steps/composition/) + +If you run the pipeline multiple times, you will also see the [Codefresh caching mechanisms]({{site.baseurl}}/docs/pipelines/pipeline-caching/) in action for faster build times. + +## Related articles +[Voting app example]({{site.baseurl}}/docs/example-catalog/ci-examples/voting-app/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + + diff --git a/_docs/example-catalog/ci-examples/mobile.md b/_docs/example-catalog/ci-examples/mobile.md new file mode 100644 index 00000000..e0c6f991 --- /dev/null +++ b/_docs/example-catalog/ci-examples/mobile.md @@ -0,0 +1,10 @@ +--- +title: "Mobile Apps" +description: "How to build Mobile applications with Codefresh CI/CD pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- +This section contains Codefresh examples for Mobile application. + +- [Android]({{site.baseurl}}/docs/learn-by-example/mobile/android/) diff --git a/_docs/example-catalog/ci-examples/nodejs.md b/_docs/example-catalog/ci-examples/nodejs.md new file mode 100644 index 00000000..4ed04ccd --- /dev/null +++ b/_docs/example-catalog/ci-examples/nodejs.md @@ -0,0 +1,15 @@ +--- +title: "Node.js" +description: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/nodejs/ +toc: true +--- + +This section contains Codefresh examples based on Node.js: + +- [Let's Chat]({{site.baseurl}}/docs/learn-by-example/nodejs/lets-chat/) - Express.js + Mongo Example +- [Voting app]({{site.baseurl}}/docs/learn-by-example/nodejs/voting-app/) - Microservices app with multiple programming languages +- [React JS app]({{site.baseurl}}/docs/learn-by-example/nodejs/react/) - React.JS + multi stage Docker build example \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/non-git-checkout.md b/_docs/example-catalog/ci-examples/non-git-checkout.md new file mode 100644 index 00000000..5f32d93b --- /dev/null +++ b/_docs/example-catalog/ci-examples/non-git-checkout.md @@ -0,0 +1,100 @@ +--- +title: "Checking out from other source control systems" +description: "Work with non-git repositories" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh has [native Git support]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/), but you can still use any other version control system such as SVN, CVS, hg, etc. + +The only requirement is that you find or create a Docker image that contains the client for that source control system and then use a +[freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) to run it. + +## Checking out Subversion code + +There is already a public [Docker image with the svn client](https://hub.docker.com/r/jgsqware/svn-client/){:target="\_blank"}, so it is very easy to run it in a Codefresh pipeline. + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomCheckout: + title: Performing SVN checkout + image: jgsqware/svn-client + commands: + - pwd + - rm -rf audacity-svn + - svn checkout https://svn.code.sf.net/p/audacity/svn/ audacity-svn + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l /codefresh/volume/' +{% endraw %} +{% endhighlight %} + +Notice the `rm` command before the clone step. This makes sure that every time the pipeline runs, the `svn checkout` step is implemented in an empty directory. + + + +## Checking out Mercurial or CVS Code + +It is very simple to use any other source control system in a Codefresh pipeline. The easiest way is to just call the respective executable. Here are two examples: + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myHgStep: + title: Using HG + image: alpine:latest + commands: + - apk add --no-cache mercurial + - hg --version + - hg clone https://www.mercurial-scm.org/repo/hg mercurial-repo + myCvsStep: + title: Using CVS + image: alpine:latest + commands: + - apk add --no-cache cvs + - cvs --version + - cvs -d :pserver:anonymous@cvs.project-open.net:/home/cvsroot checkout -c +{% endraw %} +{% endhighlight %} + +A much faster way is to create your own Dockerfile that includes the client you need and then define that image directly in the freestyle step. + + +## Checking out Perforce code + +Codefresh has created a [Perforce plugin](https://hub.docker.com/r/codefresh/cf-p4-plugin/tags){:target="\_blank"} which packs the p4 client into a Docker image to be used from Codefresh pipelines: + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + myCustomCheckout: + title: Performing Perforce checkout + image: codefresh/cf-p4-plugin:latest + commands: + - mkdir -p /codefresh/volume/p4repo/ + - p4 client -o | grep -v '#' | sed '/Root:/c\Root:/codefresh/volume/p4repo/' | p4 client -i + - cd /codefresh/volume/p4repo/ && p4 rec + - 'ls -la' + environment: + - P4PORT=serveradress:serverport + - P4CLIENT=clientname + - P4USER=username + - P4PASSWD=password +{% endraw %} +{% endhighlight %} + +Define the environment variables in [Codefresh shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Native Git checkout]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) +[Running custom git commands]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout-custom/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) diff --git a/_docs/example-catalog/ci-examples/php.md b/_docs/example-catalog/ci-examples/php.md new file mode 100644 index 00000000..b447f0d5 --- /dev/null +++ b/_docs/example-catalog/ci-examples/php.md @@ -0,0 +1,135 @@ +--- +title: "Create a Docker image for Php" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with Php projects using any of the popular frameworks (Laravel, Symphony, CakePHp etc.) + +## The example php project + +You can see the example project at [https://github.com/codefresh-contrib/php-composer-sample-app](https://github.com/codefresh-contrib/php-composer-sample-app){:target="\_blank"}. The repository contains a simple Php project that uses [composer](https://getcomposer.org/) as a package manager. + +The dockerfile uses [multi-stage builds](https://docs.docker.com/develop/develop-images/multistage-build/){:target="\_blank"} to minimize the size of the docker image. + +`Dockerfile` +{% highlight docker %} +{% raw %} +FROM composer:1.9.3 as vendor + +WORKDIR /tmp/ + +COPY composer.json composer.json +COPY composer.lock composer.lock + +RUN composer install \ + --ignore-platform-reqs \ + --no-interaction \ + --no-plugins \ + --no-scripts \ + --prefer-dist + + +FROM php:7.2-apache-stretch + +COPY . /var/www/html +COPY --from=vendor /tmp/vendor/ /var/www/html/vendor/ +{% endraw %} +{% endhighlight %} + + +## Create a Docker image for Php project + +An [example pipeline](https://github.com/codefresh-contrib/php-composer-sample-app/blob/master/codefresh.yml){:target="\_blank"} is also offered in the git repository. +It contains just two [steps]({{site.baseurl}}/docs/pipelines/steps/): + +* A [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) to fetch the code +* A [build step]({{site.baseurl}}/docs/pipelines/steps/build/) to create a Docker image + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/php-composer-sample-app' + revision: master + git: github + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-php-image + working_directory: ./ + tag: master + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Once you run this pipeline Codefresh will create a Docker image for the Php application: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/php-cicd-pipeline.png" +url="/images/learn-by-example/php/php-cicd-pipeline.png" +alt="Creating a docker image for php" +caption="Creating a docker image for php" +max-width="80%" +%} + +Notice that all dependencies are downloaded when the dockerfile is created. + + + + +## Launch Docker images + +Codefresh can also launch Docker images (using Docker swarm behind the scenes). With each Codefresh account you get access to a limited number of Docker environments that can host any Docker image or Docker compose file. + +First find your images in the [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images). + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/launch-docker-image.png" +url="/images/learn-by-example/php/launch-docker-image.png" +alt="Launching a Docker image" +caption="Launching a Docker image" +max-width="80%" +%} + +Click on the launch button and a new pipeline will run for deployment: + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/test-environment-url.png" +url="/images/learn-by-example/php/test-environment-url.png" +alt="Getting the environment url" +caption="Getting the environment url" +max-width="80%" +%} + +Notice that the pipeline logs show the dynamic URL of the application. Simply visit it with your browser +and you will see the result. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/php/test-environment.png" +url="/images/learn-by-example/php/test-environment.png" +alt="Application preview" +caption="Application preview" +max-width="80%" +%} + +Notice that these environments are only for testing and previewing your application as it is developed. They are **NOT** for production purposes. + + + +## Related articles + +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/ci-examples/populate-a-database-with-existing-data.md b/_docs/example-catalog/ci-examples/populate-a-database-with-existing-data.md new file mode 100644 index 00000000..14f27b14 --- /dev/null +++ b/_docs/example-catalog/ci-examples/populate-a-database-with-existing-data.md @@ -0,0 +1,153 @@ +--- +title: "Populate database with existing data" +description: "Preload test data before integration tests" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/populate-a-database-with-existing-data-copied/ +toc: true +old_url: /docs/populate-a-database-with-existing-data-copied +was_hidden: true +--- +In another example we saw how to run [integration tests with a database]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) such as PostgreSQL. Sometimes however, the integration tests require the database to already have some test data beforehand. With Codefresh you can use the [setup block]({{site.baseurl}}/docs/pipelines/service-containers/#preloading-data-to-databases) in service containers to preload data to a database. + + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/preload-data-to-db.png" +url="/images/examples/integration-tests/preload-data-to-db.png" +alt="Preloading test data to a DB" +caption="Preloading test data to a DB" +max-width="90%" +%} + +In this pipeline the database is populated with data from an SQL file. + +## Example PostgreSQL project + +You can see the example project at [https://github.com/codefresh-contrib/preload-db-integration-tests](https://github.com/codefresh-contrib/preload-db-integration-tests){:target="\_blank"}. The repository contains a simple integration test and an SQL file that inserts test data. + +The SQL file creates a single table in the database: + + `preload.sql` +{% highlight sql %} +{% raw %} +CREATE TABLE link ( + ID serial PRIMARY KEY, + url VARCHAR (255) NOT NULL, + name VARCHAR (255) NOT NULL, + description VARCHAR (255), + rel VARCHAR (50) +); + +INSERT INTO link (url, name) +VALUES + ('http://www.google.com','Google'), + ('http://www.azure.microsoft.com','Azure'), + ('http://www.codefresh.io','Codefresh'); +{% endraw %} +{% endhighlight %} + + +To work with the project locally, you need to have `docker`, `golang` and `postgres-client` installed on your workstation first. + +``` +$ docker run -p 5432:5432 postgres:11.5 +``` + +Then open another terminal and load the test data: + +``` +$ psql -h localhost -U postgres < testdata/preload.sql +``` + +A Postgres instance is now running at `localhost:5432` and you can run the tests with: + +``` +$ go test -v +``` + + +## Create a pipeline the preloads test data to PostgreSQL + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: +- prepare +- package +- test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefresh-contrib/preload-db-integration-tests" + revision: "master" + title: "Checking out source code" + git: github + stage: prepare + package_my_app: + stage: package + image: 'golang:1.13' + title: "Compile code" + commands: + - 'go build' + run_my_db_tests: + stage: test + image: 'golang:1.13' + title: "Running integration tests" + commands: + - 'go test -v' + environment: + - POSTGRES_HOST=my_postgresql_db + services: + composition: + my_postgresql_db: + image: postgres:11.5 + ports: + - 5432 + readiness: + timeoutSeconds: 30 + initialDelaySeconds: 10 + periodSeconds: 15 + image: 'postgres:11.5' + commands: + - "pg_isready -h my_postgresql_db -U postgres" + setup: + image: 'postgres:11.5' + commands: + - "psql -h my_postgresql_db -U postgres < /codefresh/volume/preload-db-integration-tests/testdata/preload.sql" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Compiles the code that runs `go build` through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Runs the tests while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) for an active PostgreSQL instance. Before tests are run, we launch another container with the `psql` executable to load database data. + + +> In this simple example, we use `psql` to preload the database. In a production application you might also use dedicated db tools such as [liquibase](https://hub.docker.com/r/liquibase/liquibase){:target="\_blank"} or [flyway](https://hub.docker.com/r/flyway/flyway){:target="\_blank"} or other command line tools that communicate with your database. + +Notice that we also use the `readiness` property in the testing phase so that we can verify PostgreSQL is ready and listening, before running the tests. The exact order of events is: + +1. Codefresh launches `postgres:11.5` at port 5432. +1. It then launches another container in the same network with `pg_isready` in order to wait for the DB to be up. +1. Then it launches a third container with `psql` to preload data. +1. Finally, it launches a container with `golang:1.13` to run the actual tests. + +All containers are discarded after the pipeline has finished. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) + + + diff --git a/_docs/example-catalog/ci-examples/publish-jar.md b/_docs/example-catalog/ci-examples/publish-jar.md new file mode 100644 index 00000000..47add369 --- /dev/null +++ b/_docs/example-catalog/ci-examples/publish-jar.md @@ -0,0 +1,116 @@ +--- +title: "Publish Jar" +description: "How to upload a JAR file to Nexus or artifactory" +excerpt: "" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Even though Codefresh has great support for containers, it can still be used for traditional JAR uploads of libraries or applications that are not dockerized yet. In this example we will compile a JAR and upload it to Nexus. The process is the same for Artifactory or any other package manager. + +For a Java application with Docker, see the [Gradle]({{site.baseurl}}/docs/learn-by-example/java/gradle/){} or + [Maven example]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/). + +## The example Java library project + +You can see the example project at [https://github.com/codefresh-contrib/plain-jar-sample-lib](https://github.com/codefresh-contrib/plain-jar-sample-lib). The repository contains a simple Java library built with Maven with the following goals: + +* `mvn package` creates a jar file of the library. It also runs unit tests. +* `mvn upload` uploads the jar to a package manager such as Nexus or Artifactory. + +We use Nexus for this example. To upload the Jar manually first edit the `pom.xml` with the URL of the package manager. The project also includes a [settings.xml](https://github.com/codefresh-contrib/plain-jar-sample-lib/blob/master/settings.xml) with parameterized credential. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/nexus-browser.png" +url="/images/learn-by-example/java/nexus-browser.png" +alt="The Nexus package manager" +caption="The Nexus package manager" +max-width="80%" +%} + +From your workstation you can upload the jar manually with: + + +``` +mvn -s settings.xml -Dserver.password=my-nexus-user -Dserver.username=my-nexus-pass deploy +``` +If you then visit Nexus you should see your JAR file in the snapshots repository. + +## Create a CI pipeline for publishing a JAR file + +[Create a new pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) in Codefresh and define as parameters your Nexus credentials. You could also use [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) or any other credential mechanism you already use in your other pipelines. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/nexus-credentials.png" +url="/images/learn-by-example/java/nexus-credentials.png" +alt="Parameters for Nexus" +caption="Parameters for Nexus" +max-width="50%" +%} + +Then copy/paste the [Codefresh YAML file](https://github.com/codefresh-contrib/plain-jar-sample-lib/blob/master/codefresh.yml) in the pipeline editor. +Here are the full contents of the file: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/plain-jar-sample-lib' + revision: master + git: github + publish_jar: + title: Upload to nexus + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -s settings.xml -Dserver.password=${{NEXUS_PASS}} -Dserver.username=${{NEXUS_USER}} deploy +{% endraw %} +{% endhighlight %} + +The pipeline starts by checking out the code using a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). The next step is a [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) one and packages the jar file. We also use the [Codefresh volume for caching]({{site.baseurl}}/docs/pipelines/pipeline-caching/#traditional-build-caching). + +You can define the version of Maven/JDK you want to use by picking the appropriate image from Dockerhub, or using any of your own images (even from [external registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/)). + +Note the use of the two user-defined environment variables passed to `server.password` and `server.username`. You will need to define those yourself. See the documentation on [User Procided Variables]({{site.baseurl}}/docs/pipelines/variables/#user-provided-variables). +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/publish-jar-pipeline.png" +url="/images/learn-by-example/java/publish-jar-pipeline.png" +alt="Publish JAR pipeline" +caption="Publish JAR pipeline" +max-width="100%" +%} + +Once the pipeline has finished you should see the JAR file in the Nexus browser UI. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/nexus-upload.png" +url="/images/learn-by-example/java/nexus-upload.png" +alt="Upload finished" +caption="Upload finished" +max-width="70%" +%} + +You can use the same pipeline for Artifactory or any other compliant Java package registry. + + +## Related articles +[Gradle example]({{site.baseurl}}/docs/example-catalog/ci-examples/java/gradle/) +[Spring boot example]({{site.baseurl}}/docs//example-catalog/ci-examples/spring-boot-2/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) + + + + + + diff --git a/_docs/example-catalog/ci-examples/python.md b/_docs/example-catalog/ci-examples/python.md new file mode 100644 index 00000000..d80cb991 --- /dev/null +++ b/_docs/example-catalog/ci-examples/python.md @@ -0,0 +1,11 @@ +--- +title: "Python" +description: "" +group: example-catalog +redirect_from: + - /docs/python/ +toc: true +--- +This section contains Codefresh examples based on Python. +- [Voting app]({{ site.baseurl }}/docs/learn-by-example/python/voting-app/) +- [Django]({{ site.baseurl }}/docs/learn-by-example/python/django/) diff --git a/_docs/example-catalog/ci-examples/react.md b/_docs/example-catalog/ci-examples/react.md new file mode 100644 index 00000000..0cb0466e --- /dev/null +++ b/_docs/example-catalog/ci-examples/react.md @@ -0,0 +1,172 @@ +--- +title: "React example with Yarn" +description: "Create Docker images for React applications" +group: example-catalog +sub_group: nodejs +toc: true +--- + +Codefresh can work with React projects as with any [Node.js project]({{site.baseurl}}/docs/learn-by-example/nodejs/). + +## The example React project + +You can see the example project at [https://github.com/codefresh-contrib/react-sample-app](https://github.com/codefresh-contrib/react-sample-app){:target:"\_blank"}. The repository contains a React starter project with the following tasks: + +* `yarn test` runs unit tests. +* `yarn start` to start the application locally. +* `yarn build` to create a production deployment. + +Once launched the application presents a simple page at localhost:3000. + +## React and Docker (multi-stage builds) + +The easiest way to build a React.JS application is with [multi-stage builds](https://blog.docker.com/2017/07/multi-stage-builds/){:target:"\_blank"}. With multi-stage builds a Docker build can use one base image for packaging/unit tests and a different one that will hold the runtime of the application. This makes the final image more secure and smaller in size (as it does not contain any development/debugging tools). + +In the case of React, you can use a base image that has Node and all testing utilities, while the final image has your server (e.g. nginx) with the static content and nothing else. + +The example project is actually using multi-stage builds by default. + +Here is the multi-stage Dockerfile: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM node:8.16 as build-deps +WORKDIR /usr/src/app +COPY package.json yarn.lock ./ +RUN yarn +COPY . ./ +RUN yarn build + +FROM nginx:1.12-alpine +COPY --from=build-deps /usr/src/app/build /usr/share/nginx/html +EXPOSE 80 +CMD ["nginx", "-g", "daemon off;"] +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the Node/Yarn image +1. Copies the dependencies inside the container +1. Copies the source code and creates all static files +1. Discards the Node.js image with all the JavaScript libraries +1. Starts again from the nginx image and copies **static build result** created before + +The resulting is very small, as it contains only packaged/minified files. + +## Create a CI pipeline for React.js (Docker build) + +Creating a CI/CD pipeline for React is very easy, because Codefresh can run any [node image](https://hub.docker.com/_/node/){:target:"\_blank"} that you wish. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/nodejs/react-pipeline-docker.png" +url="/images/learn-by-example/nodejs/react-pipeline-docker.png" +alt="Creating a Docker image for react.js" +caption="Creating a Docker image for react.js" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/gradle-sample-app/blob/master/codefresh.yml){:target:"\_blank"} that creates the Docker image after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/react-sample-app' + revision: master + git: github + MyUnitTests: + title: Unit test + stage: test + image: node:8.16 + commands: + - yarn install + - yarn test + environment: + - CI=true + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: react-sample-app + working_directory: ./ + tag: 'with-nginx' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, runs unit tests and finally creates a Docker image. Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +## Building a React.Js application without Docker + +If your application is not dockerized yet, you can still create a pipeline that runs any command that you would run locally. You can also choose which Node version is used for each step of the pipeline by defining a different docker image for each step. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/nodejs/react-pipeline-build.png" +url="/images/learn-by-example/nodejs/react-pipeline-build.png" +alt="Building a Reach.js application" +caption="Building a Reach.js application" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/react-sample-app/blob/master/codefresh-only-build.yml){:target:"\_blank"} that creates a production deployment of all files. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/react-sample-app' + revision: master + git: github + MyUnitTests: + title: Unit test + stage: test + image: node:11.0 + commands: + - yarn install + - yarn test + environment: + - CI=true + MyReactBuild: + title: Packaging application + stage: build + image: node:8.16 + commands: + - yarn build +{% endraw %} +{% endhighlight %} + +Notice that for demonstration purposes we uses node 11 for the tests, and node 8 for the packaging. Normally you should use the same version of node/Yarn for all your steps, but Codefresh pipelines are flexible on version of tools. + +Even when you don't create a Docker image, Codefresh still caches your workspace volume. This means that `node_modules` are downloaded only once. All subsequent builds will be much faster. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/ruby.md b/_docs/example-catalog/ci-examples/ruby.md new file mode 100644 index 00000000..0758068e --- /dev/null +++ b/_docs/example-catalog/ci-examples/ruby.md @@ -0,0 +1,183 @@ +--- +title: "Ruby" +description: "How to build a Ruby On Rails project in Codefresh" +group: example-catalog +sub_group: ci-examples +toc: true +--- +Ruby on Rails is a very popular development framework that combines ease of use and a great amount of programming languages. In Codefresh, ROR projects behave like any other web application. You can easily build them, run [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) and launch them on [demo environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/). + +The example application is located at [https://github.com/codefresh-contrib/ruby-on-rails-sample-app](https://github.com/codefresh-contrib/ruby-on-rails-sample-app){:target:"\_blank"}. + + + +## Dockerize your Ruby on Rails project + +The first step should be to write a [Dockerfile](https://github.com/codefresh-contrib/ruby-on-rails-sample-app/blob/master/Dockerfile){:target:"\_blank"} for your Rails project. As an example we will use the following: + + + +`Dockerfile` +{% highlight docker %} +FROM ruby:2.3.1-slim + +RUN apt-get update && \ + apt-get install -y build-essential libcurl4-openssl-dev libxml2-dev libsqlite3-dev libpq-dev nodejs postgresql-client sqlite3 --no-install-recommends && \ + apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# throw errors if Gemfile has been modified since Gemfile.lock +RUN bundle config --global frozen 1 + +ENV APP_PATH /usr/src/app + +RUN mkdir -p $APP_PATH + +COPY Gemfile $APP_PATH +COPY Gemfile.lock $APP_PATH + +WORKDIR $APP_PATH + +RUN bundle install + +COPY . $APP_PATH + +ENV RAILS_ENV development + +RUN bin/rake db:migrate + +RUN bin/rake assets:precompile + +EXPOSE 3000 + +CMD ["bundle", "exec", "rails", "server", "-b", "0.0.0.0"] + +{% endhighlight %} + +Notice the order of commands and especially the fact that we copy the `Gemfile` on its own first, so that we take advantage of the Docker layer caching. + +>Codefresh also supports multi-stage docker builds. You can use one parent docker image for preparing your gem modules and another one for actually deployment the application. + +Once you have a Dockerfile, [creating a pipeline in Codefresh]({{site.baseurl}}/docs/pipelines/pipelines/) is very easy either from the GUI or with the yaml syntax. + +## Simple pipeline with Docker image and unit tests + +A very simple pipeline is one that has only two steps: + +1. Build the docker image +1. Run the tests inside the docker image that was just build + +Here is the example [codefresh.yml](https://github.com/codefresh-contrib/ruby-on-rails-sample-app/blob/master/codefresh.yml){:target:"\_blank"} file. + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/ruby-on-rails-sample-app' + revision: master + git: github + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: ruby-on-rails-sample-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + RunningUnitTests: + title: Running Unit Tests + image: '${{BuildingDockerImage}}' + commands: + - rails db:migrate + - rails test +{% endraw %} +{% endhighlight %} + +The first step is a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) named `BuildingDockerImage`. It reads the Dockerfile and creates a Docker image out of it. The second step is a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) called `RunningUnitTests`. It uses the image mentioned in the first step and executes custom commands inside it. + + +## Inspecting your Docker image + +You can see all your latest [Docker artifacts]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images) by selecting *Images* from the left sidebar. + + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/images.png" +url="/images/learn-by-example/ruby/images.png" +alt="Codefresh built-in Registry" +caption="Codefresh built-in Registry" +max-width="80%" +%} + +You can click on the image and get extra details. One of the tabs contains a visual explanation of the layers contained in the image. This view can be helpful when you are trying to make your Docker images smaller (which is a recommended practice) + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/layers.png" +url="/images/learn-by-example/ruby/layers.png" +alt="Ruby On Rails image filesystem layers" +caption="Ruby On Rails Image filesystem layers" +max-width="70%" +%} + +In Codefresh you can also use any other [external registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) such as Dockerhub, Azure, Google etc. + + +## Previewing the Ruby on Rails application in a Demo environment + +Codefresh has the unique capability of launching Docker images within its infrastructure for a quick demonstration (e.g. to customers and colleagues). + +In the example Rails repository, the default development "environment" is self-contained (it uses sqlite for a database). This makes it very easy to preview. + +Launch the environment by clicking at the rocket icon in the images view. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/launch.png" +url="/images/learn-by-example/ruby/launch.png" +alt="Launching a demo environment" +caption="Launching a demo environment" +max-width="50%" +%} + +A new build will start. Once it is complete your new environment will be created. You can inspect it by clicking in the *Compositions* menu on the left sidebar and then clicking *Running Compositions*. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/environment.png" +url="/images/learn-by-example/ruby/environment.png" +alt="Inspecting a demo environment" +caption="Inspecting a demo environment" +max-width="70%" +%} + +Click the *Open App* icon on the right and your browser will open a new tab with the environment. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/ruby/preview.png" +url="/images/learn-by-example/ruby/preview.png" +alt="Previewing a demo environment" +caption="Previewing a demo environment" +max-width="50%" +%} + + +You can share this link with other people in your team. + +>Demo environments are not intended for production purposes. Use them only for quick feedback. They also shutdown automatically after a period of inactivity. + + + +## Related articles +[Introduction to Pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[On demand environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) + + + diff --git a/_docs/example-catalog/ci-examples/run-integration-tests.md b/_docs/example-catalog/ci-examples/run-integration-tests.md new file mode 100644 index 00000000..9bbbbdc0 --- /dev/null +++ b/_docs/example-catalog/ci-examples/run-integration-tests.md @@ -0,0 +1,102 @@ +--- +title: "Run integration tests" +description: "Launch separate App and test containers" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/run-integration-tests/ +toc: true +--- +In this example, we will see a Java/Tomcat project using JUnit for unit tests and Spock for integration tests. For the integration test phase, we will launch both the application and the tests in order to run the integration tests against a real web instance (without mocking). + +{% include image.html +lightbox="true" +file="/images/examples/integration-tests/integration-tests.png" +url="/images/examples/integration-tests/integration-tests.png" +alt="Integration tests with Codefresh" +caption="Integration tests with Codefresh" +max-width="90%" +%} + +The integration tests will look at the application instance at `app:8080`. + +## Example Java/Tomcat/Spring project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-integration-tests](https://github.com/codefreshdemo/cf-example-integration-tests){:target:"\_blank"}. The repository contains the Java source code and some integration tests. + +You can play with it locally by using Docker compose to launch both the application and the tests. + +## Create a pipeline with separate integration tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - prepare + - build + - test +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "codefreshdemo/cf-example-integration-tests" + revision: "master" + git: github + stage: prepare + build_app_image: + title: "Building Docker Image" + type: "build" + image_name: "my-spring-app" + tag: "master" + dockerfile: "Dockerfile" + stage: build + build_test_image: + title: "Building Docker Test Image" + type: "build" + image_name: "my-junit-spock-tests" + tag: "master" + dockerfile: "Dockerfile.testing" + stage: test + run_integration_tests: + title: "Running integration tests" + stage: test + image: '${{build_test_image}}' + commands: + # Tomcat is certainly up at this point + - mvn verify -Dserver.host=app + services: + composition: + app: + image: '${{build_app_image}}' + ports: + - 8080 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: byrnedo/alpine-curl + commands: + - "curl http://app:8080/wizard/" + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a Docker image with only Tomcat and the application WAR through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Builds a helper image that contains the source code and Maven to run integration tests. +1. Runs the `mvn verify` command in the helper image while launching a [service container]({{site.baseurl}}/docs/pipelines/service-containers/) with the Tomcat/Java image. + +Notice that we also use the `readiness` property in the testing phase to verify that the application +is actually up, before running the tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Service Containers]({{site.baseurl}}/docs/pipelines/service-containers/) +[Integration Tests with Postgres]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +[Integration Tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +[Integration Tests with Mongo]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +[Integration Tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/run-unit-tests.md b/_docs/example-catalog/ci-examples/run-unit-tests.md new file mode 100644 index 00000000..360da67e --- /dev/null +++ b/_docs/example-catalog/ci-examples/run-unit-tests.md @@ -0,0 +1,106 @@ +--- +title: "Run unit tests" +description: "Running unit tests in Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/run-unit-tests/ +toc: true +--- + +As explained in [unit tests]({{site.baseurl}}/docs/testing/unit-tests/), Codefresh supports several ways of running unit tests. The most common scenarios use an existing Docker Hub image (common with compiled languages such as Java and Go), or the application image itself (common with languages such as JavaScript/Python/Ruby/PHP). + +In this example, we will see both ways using two different applications in a single pipeline. + +{% include image.html +lightbox="true" +file="/images/examples/unit-tests/unit-tests-pipeline.png" +url="/images/examples/unit-tests/unit-tests-pipeline.png" +alt="Unit tests with Codefresh" +caption="Unit tests with Codefresh" +max-width="90%" +%} + +In the first case, we run unit tests *before* creating the application docker image. In the second case, we run the unit tests +*inside* the application Docker image. + +## Example Python/Go project + +You can see the example project at [https://github.com/codefreshdemo/cf-example-unit-test](https://github.com/codefreshdemo/cf-example-unit-test){:target="\_blank"}. The repository contains two applications (Python and Go) with their respective unit tests. + +You can play with it locally by using Docker commands to package the applications. + +## Create a pipeline with unit tests + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - 'Microservice A' + - 'Microservice B' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-unit-test' + revision: 'master' + git: github + stage: prepare + run_my_tests_before_build: + title: Running Unit tests directly + stage: 'Microservice A' + image: golang:1.12 + working_directory: './golang-app-A' + commands: + - go test -v + build_after_my_tests: + title: Building Go Docker Image + type: build + stage: 'Microservice A' + image_name: my-go-image + working_directory: './golang-app-A' + tag: 'master' + dockerfile: Dockerfile + build_before_my_tests: + title: Building Python Docker Image + type: build + stage: 'Microservice B' + image_name: my-python-image + working_directory: './python-app-B' + tag: 'master' + dockerfile: Dockerfile + run_my_tests_inside_image: + title: Running Unit tests inside App image + stage: 'Microservice B' + image: ${{build_before_my_tests}} + working_directory: '/app' + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Runs unit test for the GO application using the Dockerhub image `golang:1.12`. +1. Builds the Docker image for the Go application through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Builds the Docker image for the Python application. +1. Runs unit tests for the Python application using as runtime context the application image that was just created. + + +In the second case, the tests run in the context of `build_before_my_tests` which is the name of the step that creates the Docker image for Python. Read more about [context variables]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables). + +We generally recommend the first approach, so that your production Docker image does not contain any unit testing libraries or frameworks, but there is no right or wrong choice regarding the way you run unit tests. + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration test example]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +[Service Containers]({{site.baseurl}}/docs/pipelines/service-containers/) +[Freestyle step]({{site.baseurl}}/docs/pipelines/steps/) + + diff --git a/_docs/example-catalog/ci-examples/rust.md b/_docs/example-catalog/ci-examples/rust.md new file mode 100644 index 00000000..1efd443b --- /dev/null +++ b/_docs/example-catalog/ci-examples/rust.md @@ -0,0 +1,84 @@ +--- +title: "Compile and test a Rust application" +description: "Using Codefresh pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh can work with any Rust application very easily as both `rustc` and `cargo` are already offered in Dockerhub. + +## The example Rust project + +You can see the example project at [https://github.com/codefresh-contrib/rust-sample-app](https://github.com/codefresh-contrib/rust-sample-app){:target="\_blank"}. The repository contains a Rust starter project with a dummy unit test. + +* `cargo build` compiles the code. +* `cargo test` runs unit tests +* `cargo clean` removes artifacts and binaries. + + +## Create a CI pipeline for Rust applications + +Creating a CI/CD pipeline for Rust is very easy, because Codefresh can run any [Rust image](https://hub.docker.com/_/rust){:target="\_blank"} that you wish. Rust docker images already contain the `cargo` package manager. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/rust/rust-pipeline.png" +url="/images/learn-by-example/rust/rust-pipeline.png" +alt="Compiling a Rust application in a pipeline" +caption="Compiling a Rust application in a pipeline" +max-width="80%" +%} + +Here is the [full pipeline](https://github.com/codefresh-contrib/rust-sample-app/blob/master/codefresh.yml){:target="\_blank"} that compiles the application after checking out the code. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "test" +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefresh-contrib/rust-sample-app" + revision: "master" + stage: "clone" + compile: + title: "Building Code" + type: "freestyle" + image: "rust:1.44-stretch" + working_directory: "${{clone}}" + environment: + - CARGO_HOME=/codefresh/volume/cargo + commands: + - "cargo build" + stage: "build" + test: + title: "Running tests" + type: "freestyle" + image: "rust:1.44-stretch" + working_directory: "${{clone}}" + environment: + - CARGO_HOME=/codefresh/volume/cargo + commands: + - "cargo test" + stage: "test" + +{% endraw %} +{% endhighlight %} + +This pipeline clones the source code, compiles the code and runs unit tests. In all cases we use the public Docker image of Rust that also contains `cargo`. + +We also pass the `CARGO_HOME` environment variable to place the Cargo cache on the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). See the [Caching documentation]({{site.baseurl}}/docs/pipelines/pipeline-caching/#traditional-build-caching) for more details. + + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) \ No newline at end of file diff --git a/_docs/example-catalog/ci-examples/scala-hello-world.md b/_docs/example-catalog/ci-examples/scala-hello-world.md new file mode 100644 index 00000000..66681d4a --- /dev/null +++ b/_docs/example-catalog/ci-examples/scala-hello-world.md @@ -0,0 +1,184 @@ +--- +title: "Scala: Hello World" +description: "Use Scala and Codefresh to clone, package, and build a Docker image" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/scala-hello-world/ +toc: true +--- + +So, you’ve decided to try Codefresh? Welcome on board! + +We’ll help you get up to speed with basic functionality such as: compiling, building Docker images and launching. + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) + +## The Example Scala Application + +This project uses `Scala` to build an application which will eventually become a distributable Docker image. + +You can find the example application on [GitHub](https://github.com/codefresh-contrib/scala-hello-world-app){:target="\_blank"}. + +There are two pipeline examples provided in this tutorial: + +- Multi-stage Docker build +- Single stage Docker Build + +## Example Pipeline #1: Single stage Docker Build + +This example uses a single stage Docker build. The pipeline will have three stages: + +- A stage for cloning +- A stage for packaging +- A stage for building + +{% include image.html +lightbox="true" +file="/images/examples/scala/single-stage-pipeline.png" +url="/images/examples/scala/single-stage-pipeline.png" +alt="Codefresh UI pipeline view" +caption="Codefresh UI pipeline view" +max-width="100%" +%} + +Here is the Dockerfile used for this example: + +`Dockerfile-single-stage` +```shell +FROM openjdk:8-jre-alpine3.9 + +COPY . . + +CMD ["java", "-cp", "target/scala-2.12/*.jar:scala-library-2.12.2.jar", "HelloWorld"] +``` + +And here is the pipeline. You can copy and paste it in the inline YAML editor in the UI: + + `codefresh-single-stage.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - clone + - package + - build + +steps: + clone: + title: Cloning repository... + type: git-clone + stage: clone + arguments: + repo: codefresh-contrib/scala-hello-world-app + revision: master + package: + title: Packaging application... + type: freestyle + stage: package + working_directory: ./scala-hello-world-app + arguments: + image: hseeberger/scala-sbt:11.0.6_1.3.9_2.13.1 + commands: + - sbt -Dsbt.ivy.home=/codefresh/volume/ivy_cache clean compile package + - cp /codefresh/volume/ivy_cache/cache/org.scala-lang/scala-library/jars/scala-library-2.12.2.jar . + build_image: + title: Building Docker image... + type: build + working_directory: ${{clone}} + stage: build + arguments: + image_name: codefresh/scala-sample-app + tag: 1.0.0 + dockerfile: Dockerfile-single-stage +{% endraw %} +{% endhighlight %} + +The above pipeline does the following: + +1. A [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step that clones the main repository +2. A [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that uses an SBT image that packages the application (note how `sbt.ivy.home` is set to an arbitrarily named directory that is part of the codefresh volume). This ensures we cache dependencies to [speed up builds]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/#caching-the-maven-dependencies), similar to Maven. +3. The last step, `build_image`, is a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) that builds a Docker image using the Dockerfile provided in the repository. + +## Example Pipeline #2: Multi-stage Docker Build + +This example uses a multi stage Docker build. The pipeline will have only two stages this time, as packaging of the app is handled in the Dockerfile itself: + +- A stage for cloning +- A stage for building + +{% include image.html +lightbox="true" +file="/images/examples/scala/multi-stage-pipeline.png" +url="/images/examples/scala/multi-stage-pipeline.png" +alt="Codefresh UI pipeline view" +caption="Codefresh UI pipeline view" +max-width="100%" +%} + +Here, you will find the multi-stage Dockerfile, copying over only the jars we need: + +`Dockerfile-multi-stage` + +```shell +# first stage + +FROM hseeberger/scala-sbt:11.0.6_1.3.9_2.13.1 AS build + +COPY ./ ./ + +RUN sbt compile clean package + +# second stage + +FROM openjdk:8-jre-alpine3.9 + +COPY --from=build /root/target/scala-2.12/*.jar /scala-hello-world-sample-app.jar +COPY --from=build /root/.ivy2/cache/org.scala-lang/scala-library/jars/scala-library-2.12.2.jar /scala-library-2.12.2.jar + +CMD ["java", "-cp", "scala-hello-world-sample-app.jar:scala-library-2.12.2.jar", "HelloWorld"] +``` +Here is the pipeline, you can copy and paste it into the inline YAML editor: + +`codefresh-multi-stage.yml` + +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - clone + - build + +steps: + clone: + title: Cloning repository... + type: git-clone + stage: clone + arguments: + repo: codefresh-contrib/scala-hello-world-app + revision: master + build_image: + title: Building Docker image... + type: build + working_directory: ${{clone}} + stage: build + arguments: + image_name: codefresh/scala-hello-world-app + tag: 1.0.0 + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +1. A [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step that clones the main repository +2. A [build step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that builds our code into a Docker image using the Dockerfile present in the repository + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Freestyle Step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + diff --git a/_docs/example-catalog/ci-examples/scala.md b/_docs/example-catalog/ci-examples/scala.md new file mode 100644 index 00000000..7415259d --- /dev/null +++ b/_docs/example-catalog/ci-examples/scala.md @@ -0,0 +1,10 @@ +--- +title: "Scala" +description: "" +group: example-catalog +redirect_from: + - /docs/scala/ +toc: true +--- +This section contains Codefresh examples based on Scala. +- [Scala: Hello World]({{site.baseurl}}/docs/learn-by-example/scala/scala-hello-world/) diff --git a/_docs/example-catalog/ci-examples/sending-the-notification-to-jira.md b/_docs/example-catalog/ci-examples/sending-the-notification-to-jira.md new file mode 100644 index 00000000..2d024509 --- /dev/null +++ b/_docs/example-catalog/ci-examples/sending-the-notification-to-jira.md @@ -0,0 +1,88 @@ +--- +title: "Send notification to Jira" +description: "" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +The plugin marketplace offers several freestyle steps for your Codefresh pipeline. + +One of those steps is the [Jira Issue Manager](https://codefresh.io/steps/step/jira-issue-manager){:target:"\_blank"}. + +## Prerequisites +* [Codefresh pipeline]({{site.baseurl}}/docs/getting-started/create-a-basic-pipeline/) +* [Jira account](https://www.atlassian.com/software/jira){:target:"\_blank"} + +## Example +This documentation uses the following [example](https://github.com/codefresh-contrib/jira-demo-app){:target:"\_blank"}. You can either use the example provided to try out the Jira integration or follow along with your own application. + +1. You need an issue in your Jira account that you want to link to your Codefresh pipeline. If you do not have one yet, please create an issue. (Note that the project type and who is creating the issue etc. does not matter.) Alternatively, you can also create an issue first with the Jira step. However, this is not explained in this example. + +2. Next, add the following step to your Codefresh pipeline. In case you are using the example, the [codefresh.yml](https://github.com/codefresh-contrib/jira-demo-app/blob/master/codefresh.yml){:target:"\_blank"} file is already added. + +{% highlight yaml %} + JiraCommentCreate: + title: "Add Jira Comment" + type: "jira-issue-manager" + stage: "deploy" + arguments: + JIRA_BASE_URL: '${{JIRA_BASE_URL}}' + JIRA_USERNAME: '${{JIRA_USERNAME}}' + JIRA_API_KEY: '${{JIRA_API_KEY}}' + JIRA_ISSUE_SOURCE_FIELD: '${{JIRA_ISSUE_SOURCE_FIELD}}' + ACTION: "comment_create" + COMMENT_BODY: "Build number ${{CF_BUILD_URL}} finished in Codefresh" +{% endhighlight yaml %} + +Let's look in detail at this step. +- Everything up to the arguments is similar to other Codefresh steps. + +These arguments are required to use the step: +- `JIRA_BASE_URL`: This is the url of your organisation e.g. 'https://company-name.atlassian.net' +- `JIRA_USERNAME`: This is usually the e-mail that you are logged in with at Jira +- `JIRA_API_KEY`: Note that you will have to create this key. The official [Atlassian documentation](https://confluence.atlassian.com/cloud/api-tokens-938839638.html){:target:"\_blank"} details how it can be created. + +Then we added these arguments for our specific step: +- `JIRA_ISSUE_SOURCE_FIELD`: This is the tag that identifies your issue, for example, `MKTG-102` +- Within the comment, we use a [Codefresh native variable]({{site.baseurl}}/docs/docs/pipelines/variables/) `CF_BUILD_URL`, which references your pipeline build and allows you to search for your pipeline. + +All variables use the Codefresh-specific variable notation ${% raw %}`{{MY_VARIABLE_EXAMPLE}}`{% endraw %}`. + +Since it is a new stage in your Codefresh pipeline, you want to add it at the top to your stages, e.g.: + +{% highlight yaml %} + stages: + - "clone" + - "build" + - "JiraCommentCreate" +{% endhighlight yaml %} + +Note that you can [provide the variables]({{site.baseurl}}/docs/pipelines/shared-configuration/) needed for the Jira step directly in the shared configuration. The benefits are: +* You do not have to post sensitive information, such as the API key, directly in the codefresh.yml. +* If you use the same step across multiple pipelines, you don't have to copy-paste the same variables. + +Once you run the pipeline, you should be able to see the following output or similar: + +{% include image.html +lightbox="true" +file="/images/integrations/jira/codefreshpipeline.png" +url="/images/integrations/jira/codefreshpipeline.png" +alt="Pipeline with Jira integration" +max-width="80%" +%} + +And the comment, including the URL to the pipeline, should be added to your Jira issue: + +{% include image.html +lightbox="true" +file="/images/integrations/jira/jira-comment.png" +url="/images/integrations/jira/jira-comment.png" +alt="Comment in Jira" +max-width="80%" +%} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Sending notifications to Slack]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-slack/) +[Create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) diff --git a/_docs/example-catalog/ci-examples/sending-the-notification-to-slack.md b/_docs/example-catalog/ci-examples/sending-the-notification-to-slack.md new file mode 100644 index 00000000..1af32946 --- /dev/null +++ b/_docs/example-catalog/ci-examples/sending-the-notification-to-slack.md @@ -0,0 +1,44 @@ +--- +title: "Send notification to Slack" +description: "Connect your Codefresh pipelines to Slack" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/sending-the-notification-to-slack/ +toc: true +--- + +There are many ways to integrate Slack with Codefresh: + +1. Use the [global slack integration]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) +1. Use individual pipeline plugins such [slack-message-sender](https://codefresh.io/steps/step/slack-message-sender){:target:"\_blank"} and [slack-notifier](https://codefresh.io/steps/step/slack-notifier){:target:"\_blank"} +1. Use simple POST requests with Curl, as explained in this article + +## Custom webhook to Slack + +Use a container image with a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) such as `byrnedo/alpine-curl` to send a notification to a Slack channel. + +{:start="1"} +1. Get the {% raw %}```${{SLACK_WEB_URL}}```{% endraw %} and put it in the Environment Variables or use [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/). + + > To integrate with Slack, see [https://api.slack.com/incoming-webhooks](https://api.slack.com/incoming-webhooks){:target="_blank"}. + +{:start="2"} +2. Add the following step to your `codefresh.yml`: + + `slack step` +{% highlight yaml %} +slack_notify: + image: byrnedo/alpine-curl # curlimages/curl, or any other curl image + commands: + - curl -X POST --data-urlencode 'payload={"text":"Test slack integration via yaml"}' {% raw %}${{SLACK_WEB_URL}}{% endraw %} +{% endhighlight %} + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[Global Slack Integration]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) +[Advanced Workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +[Hooks in pipelines]({{site.baseurl}}/docs/pipelines/hooks/) +[Shared Configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) + diff --git a/_docs/example-catalog/ci-examples/shared-volumes-between-builds.md b/_docs/example-catalog/ci-examples/shared-volumes-between-builds.md new file mode 100644 index 00000000..99db466d --- /dev/null +++ b/_docs/example-catalog/ci-examples/shared-volumes-between-builds.md @@ -0,0 +1,115 @@ +--- +title: "Share data between pipeline steps" +description: "How to cache folders between steps and builds" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/shared-volumes-between-builds/ +toc: true +--- + +Codefresh creates a [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) in each pipeline that is automatically shared with all freestyle steps. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume.png" +url="/images/pipeline/introduction/codefresh-volume.png" +alt="Codefresh volume" +caption="All steps share the same volume" +max-width="90%" +%} + +This volume exists at `/codefresh/volume` by default. Simply copy files there to have them available to all Codefresh steps (as well as subsequent builds of the same pipeline). + +>The [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) deletes any files **not** specified in `.gitignore`. To cache a folder that exists in your project directory (such as `node_modules`), you must also add it to `.gitignore` + +## Using the shared volume + +You can see the example project at [https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds](https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds){:target="\_blank"}. The repository contains a simple application, a Dockerfile, and an example pipeline that saves/reads a dummy file to the Codefresh volume. + + +Here is the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +stages: + - "clone" + - "build" + - "shared-volume" + +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "codefreshdemo/cf-example-shared-volumes-between-builds" + revision: "master" + stage: "clone" + + build_image: + title: "Building image" + type: "build" + image_name: "sample-app" + working_directory: "${{clone}}" + tag: "demo" + dockerfile: "Dockerfile" + stage: "build" + + copy_to_shared_volume: + title: "Copy file to shared volume" + type: "freestyle" + image: alpine:3.9 + working_directory: "${{clone}}" + commands: + - ls -l /codefresh/volume/ + - cp ./artifact/artifact.example /codefresh/volume/artifact.example + stage: "shared-volume" + + list_shared_volume: + title: "List shared volume files" + type: "freestyle" + image: alpine:3.9 + working_directory: "${{clone}}" + commands: + - pwd + - ls -l /codefresh/volume + stage: "shared-volume" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +1. Builds a docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +1. Copies the file `artifact.example` to the volume through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +1. Reads the contents of the volume through a different freestyle step. + +If you run the pipeline, you will see the file contents in the fourth step: + +{% include +image.html +lightbox="true" +file="/images/examples/shared-workspace/volume-list.png" +url="/images/examples/shared-workspace/volume-list.png" +alt="Listing volume contents" +caption="Listing volume contents" +max-width="80%" +%} + + +If you run the pipeline a second time, you will see the dummy file in all steps, as the volume is automatically cached for subsequent builds as well. + + +## Caching build dependencies and Docker layers + +Read more about caching build dependencies in [caching in pipelines]({{site.baseurl}}/docs/pipelines/pipeline-caching/), and in this [blog post](https://codefresh.io/blog/caching-build-dependencies-codefresh-volumes/){:target:"\_blank"}. + + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/examples/#ci-examples) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle) diff --git a/_docs/example-catalog/ci-examples/shared-volumes-of-service-from-composition-step-for-other-yml-steps.md b/_docs/example-catalog/ci-examples/shared-volumes-of-service-from-composition-step-for-other-yml-steps.md new file mode 100644 index 00000000..d99b74cf --- /dev/null +++ b/_docs/example-catalog/ci-examples/shared-volumes-of-service-from-composition-step-for-other-yml-steps.md @@ -0,0 +1,45 @@ +--- +title: "Share service volumes in composition steps" +description: "How to share data in compositions" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/shared-volumes-of-service-from-composition-step-for-other-yml-steps/ +toc: true +--- +Using this repository, we'll help you get up to speed with basic functionality such as building Docker images and using the shared volumes. + +This project uses Node Js to build an application which will eventually become a distributable Docker image. +To share volumes of service in composition step for other yml steps you can use the variable {% raw %}```${{CF_VOLUME_NAME}}```{% endraw %}. It will refer to the volume that was generated for the specific flow. Can be used in conjunction with a composition to provide access to your cloned repository. + +>Read more about caching build dependencies our [blog](https://codefresh.io/blog/caching-build-dependencies-codefresh-volumes/){:target="_blank"}. + +## Looking around +In the root of this repository you'll find a file named `codefresh.yml`, this is our build descriptor that describes the different steps that comprise our process. Let's quickly review the contents of this file: + + `codefresh.yml` +{% highlight yaml %} +step_file_generation: + type: composition + composition: + version: '2' + services: + service1: + volumes: + - {% raw %}${{CF_VOLUME_NAME}}{% endraw %}:/codefresh/volume + image: {% raw %}${{build_step}}{% endraw %} + command: bash -c "echo hello > /codefresh/volume/myfile.txt" + composition_candidates: + test: + image: {% raw %}${{build_step}}{% endraw %} + command: echo hello +{% endhighlight %} + +>Example + Just head over to the example [**repository**](https://github.com/codefreshdemo/cf-example-shared-volumes-composition-step){:target="_blank"} in GitHub, and follow the instructions there. + +The way the volume is shared between builds is that upon build completion we create an image of the volume state to be used in the next builds. If you run 2 builds in parallel from the same pipeline and at the same time, each will use the same last volume image, but it’ll run separately on both. The volume image you’ll get upon completion is the state of the build that finished last. + + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) diff --git a/_docs/example-catalog/ci-examples/spring-boot-2.md b/_docs/example-catalog/ci-examples/spring-boot-2.md new file mode 100644 index 00000000..37230e51 --- /dev/null +++ b/_docs/example-catalog/ci-examples/spring-boot-2.md @@ -0,0 +1,252 @@ +--- +title: "Spring Boot 2/Maven" +description: "Create Docker images for Spring/Maven" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/spring-boot-2/ + - /docs/java/spring-boot-2/ +toc: true +--- + +Spring Boot is quickly becoming a very popular choice for building Java back-end applications. Compared to traditional application servers,it is a bit different since it includes a servlet container in the final JAR file allowing +for self-contained Java Archives (JAR files). + +Codefresh can easily handle Spring Boot applications that are dockerized either in the traditional way or using multi-stage builds. + +## The example Java project + +You can see the example project at [https://github.com/codefresh-contrib/spring-boot-2-sample-app](https://github.com/codefresh-contrib/spring-boot-2-sample-app){:target="\_blank"}. The repository contains a Spring Boot 2 project built with Maven with the following goals: + +* `mvn package` creates a jar file that can be run on its own (exposes port 8080). It also runs unit tests. +* `mvn verify` runs integration tests as well. The application is launched locally as part of the Maven lifecycle. + +Once launched the application presents a simple message at localhost:8080 and also at the various `/actuator/health` endpoints. You can use the standard `spring-boot:run` command to run it locally (without Docker). + +## Spring Boot 2 and Docker (package only) + +A Dockerfile is also provided at the same repository. It uses the base JRE image and just copies the JAR file inside the container. + + `Dockerfile.only-package` +{% highlight docker %} +{% raw %} +FROM java:8-jre-alpine + +EXPOSE 8080 + +RUN mkdir /app +COPY target/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] + +HEALTHCHECK --interval=1m --timeout=3s CMD wget -q -T 3 -s http://localhost:8080/actuator/health/ || exit 1 + +{% endraw %} +{% endhighlight %} + +This means that _before_ building the Docker image, the compilation step (`mvn package`) is expected to be finished already. Therefore, in the `codefresh.yml` file we need at least two steps. The first one should prepare the JAR file and the second +one should create the Docker image. + +### Create a CI pipeline for Spring + +The repository also contains a premade [Codefresh YAML file]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) that you can use as a starting point in your own Spring Boot 2 projects. + +Here are the full contents of the file. + + `codefresh-package-only.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build + - 'integration test' +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + run_unit_tests: + title: Compile/Unit test + stage: test + image: 'maven:3.5.2-jdk-8-alpine' + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'non-multi-stage' + dockerfile: Dockerfile.only-package + run_integration_tests: + title: Integration test + stage: 'integration test' + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://my-spring-app + services: + composition: + my-spring-app: + image: '${{build_app_image}}' + ports: + - 8080 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: byrnedo/alpine-curl + commands: + - "curl http://my-spring-app:8080/" +{% endraw %} +{% endhighlight %} + +The pipeline starts by checking out the code using a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). The next step is a [freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) one and packages the jar file. Next we have a [build step]({{site.baseurl}}/docs/pipelines/steps/build/) that creates the docker image. Finally we have another freestyle +step that uses [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) to run integration tests. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/spring-boot-steps.png" +url="/images/learn-by-example/java/spring-boot-steps.png" +alt="Spring boot pipeline" +caption="Spring boot pipeline" +max-width="80%" +%} + +After checking out the code we use the standard [Maven Docker image](https://hub.docker.com/_/maven/){:target="\_blank"} to compile the Spring Boot source code and create a JAR file. We also pass a parameter that changes the Maven cache location folder. The reason for this parameter is that the default Maven location is `/root/.m2` which is defined as a volume (and thus discarded after each build). + +### Caching the Maven dependencies + +Codefresh is smart enough that [caches automatically]({{site.baseurl}}/docs/pipelines/pipeline-caching/) for us the workspace of a build (`/codefresh/volume`). This works great for build tools that keep their cache in the project folder, but not for Maven/Gradle which keep their cache externally. By changing the location of the Maven repo on the project folder (the `m2_repository` name is arbitrary) we make sure that Codefresh will cache automatically the Maven libraries resulting in much faster builds. + +The next step is a Docker build. We name our image **spring-boot-2-sample-app** and tag it with a string `non-multi-stage` but of course you can use any other tag name that you wish. + +{% include image.html +lightbox="true" +file="/images/learn-by-example/java/spring-boot-docker-image.png" +url="/images/learn-by-example/java/spring-boot-docker-image.png" +alt="Spring Boot Docker image" +caption="Spring Boot Docker image" +max-width="80%" +%} + +Once the pipeline is finished you will see the Spring Boot 2 Docker image your [Docker image dashboard]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#viewing-docker-images). + +The last step is similar to the unit tests, but this time we run integration tests. We define again a custom cache folder so when you run the build you will see that Maven will automatically pick the cache from the previous step. All Codefresh steps in a pipeline [run on the same workspace]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps), so the build results from one step are visible to the next. + +>Notice that because the [Maven lifecycle](https://maven.apache.org/guides/introduction/introduction-to-the-lifecycle.html){:target="\_blank"} also executes the previous steps in a build, the `mvn verify` command essentially will run `mvn package` as well. In theory we could just have the _Integration_ step in this pipeline on its own. That step would build the code, run unit and integration tests all in one stage. For demonstration purposes however, we include two steps so that you can see the correct usage of Maven cache. + + +## Spring Boot 2 and Docker (multi-stage builds) + +Docker added [multi-stage builds](https://blog.docker.com/2017/07/multi-stage-builds/){:target="\_blank"} at version 17.05. With multi-stage builds a Docker build can use one base image for compilation/packaging/unit tests and a different one that will hold the runtime of the application. This makes the final image more secure and smaller in size (as it does not contain any development/debugging tools). + +In the case of Java, multistage builds allow for the compilation itself to happen during the build process, even though the final Docker image will not contain a full JDK. + + +Here is the multi-stage build definition: + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM maven:3.5.2-jdk-8-alpine AS MAVEN_TOOL_CHAIN +COPY pom.xml /tmp/ +RUN mvn -B dependency:go-offline -f /tmp/pom.xml -s /usr/share/maven/ref/settings-docker.xml +COPY src /tmp/src/ +WORKDIR /tmp/ +RUN mvn -B -s /usr/share/maven/ref/settings-docker.xml package + +FROM java:8-jre-alpine + +EXPOSE 8080 + +RUN mkdir /app +COPY --from=MAVEN_TOOL_CHAIN /tmp/target/*.jar /app/spring-boot-application.jar + +ENTRYPOINT ["java","-Djava.security.egd=file:/dev/./urandom","-jar","/app/spring-boot-application.jar"] + +{% endraw %} +{% endhighlight %} + +This docker build does the following: + +1. Starts from the standard Maven Docker image +1. Copies only the `pom.xml` file inside the container +1. Runs a mvn command to download all dependencies found in the `pom.xml` +1. Copies the rest of the source code in the container +1. Compiles the code and runs unit tests (with `mvn package`) +1. Discards the Maven image with all the compiled classes/unit test results etc +1. Starts again from the JRE image and copies **only** the JAR file created before + +The order of the steps is tuned so that it takes advantage of the layer caching built-in to Docker. +If you change something in the source code Docker already has a layer with Maven dependencies so they +will not be re-downloaded again. Only if you change the `pom.xml` file itself, Docker will start again from the lowest layer. + +Again, we define a custom location for the Maven cache (using the `settings-docker.xml` file). This way the Maven dependencies are placed inside the container and they will be cached automatically with the respective layer (Read more about this technique [at the official documentation](https://github.com/carlossg/docker-maven#packaging-a-local-repository-with-the-image){:target="\_blank"}. + +### Create a CI pipeline for Spring (multi-stage Docker builds) + +Because in multi-stage builds Docker itself handles most of the build process, moving the project to Codefresh is straightforward. We just need [a single step](https://github.com/codefresh-contrib/spring-boot-2-sample-app/blob/master/codefresh.yml){:target="\_blank"} that creates the Docker image after checking out the code. The integration test step is the same as before. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build + - 'integration test' +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + build_app_image: + title: Building Docker Image + type: build + stage: build + image_name: spring-boot-2-sample-app + working_directory: ./ + tag: 'multi-stage' + dockerfile: Dockerfile + run_integration_tests: + title: Integration test + stage: 'integration test' + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository verify -Dserver.host=http://my-spring-app + services: + composition: + my-spring-app: + image: '${{build_app_image}}' + ports: + - 8080 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: byrnedo/alpine-curl + commands: + - "curl http://my-spring-app:8080/" +{% endraw %} +{% endhighlight %} + +This will compile/test/package the Spring Boot application and create a Docker image. Codefresh is automatically caching +Docker layers (it uses the Docker image of a previous build as a cache for the next) and therefore builds will become +much faster after the first one finishes. + + +## Related articles +[Gradle example]({{site.baseurl}}/docs/example-catalog/ci-examples/gradle/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[How Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) diff --git a/_docs/example-catalog/ci-examples/uploading-or-downloading-from-gs.md b/_docs/example-catalog/ci-examples/uploading-or-downloading-from-gs.md new file mode 100644 index 00000000..1bfcf82d --- /dev/null +++ b/_docs/example-catalog/ci-examples/uploading-or-downloading-from-gs.md @@ -0,0 +1,152 @@ +--- +title: "Upload/Download files to/from Google Storage" +description: "Upload and download a JAR from Google Storage from within a pipeline" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +- A [Google Storage Bucket](https://cloud.google.com/storage/docs/creating-buckets){:target="\_blank"} with public read access +- A private key [downloaded](https://cloud.google.com/storage/docs/authentication#gsutilauth){:target="\_blank"} for the existing service account associated with your bucket (for this example, we base64 encoded the key for ease of use in a pipeline variable using `base64 key_file.json > key_file.b64`) + +## Example Project + +The example project is at [GitHub](https://github.com/codefresh-contrib/gcloud-storage-sample-app.git){:target="\_blank"}. The application is a simple Scala Hello World application contained in a jar, with a dependency on a scala-library jar which we will download from the bucket and package into a Docker image. + +Our project contains two pipelines, one to upload the dependency JAR _to_ our bucket, and the other to download the JAR _from_ the bucket. + +## Create the first pipeline + +The first pipeline contains one stage/step, to upload the JAR to the Google Storage Bucket. + +{% include image.html +lightbox="true" +file="/images/examples/gs/gs-upload-pipeline.png" +url="/images/examples/gs/gs-upload-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +You need to define a pipeline variable, KEY_FILE, in the pipeline settings: + +{% include image.html +lightbox="true" +file="/images/examples/gs/gs-pipeline-vars.png" +url="/images/examples/gs/gs-pipeline-vars.png" +alt="Codefresh UI Pipeline Variables" +caption="Codefresh UI Pipeline Variables" +max-width="70%" +%} + +Here is the first pipeline: + +`codefresh-upload.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - "upload" + +steps: + upload: + title: "Uploading library jar to GS..." + type: "freestyle" + stage: "upload" + arguments: + image: "google/cloud-sdk:slim" + commands: + - echo $KEY_FILE | base64 --decode > key_file.json + - gcloud auth activate-service-account --key-file=key_file.json + - curl https://repo1.maven.org/maven2/org/scala-lang/scala-library/2.12.2/scala-library-2.12.2.jar | gsutil cp - gs://anna-demo-bucket/scala-library-2.12.2.jar +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Uploads a JAR from Maven into our Google Storage bucket through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +## Create the second pipeline + +Our second pipeline has four stages: + +- A stage for cloning the repository +- A stage for downloading the jar from the bucket +- A stage for building the image +- A stage for pushing the image to the repository + +{% include image.html +lightbox="true" +file="/images/examples/gs/gs-download-pipeline.png" +url="/images/examples/gs/gs-download-pipeline.png" +alt="Codefresh UI Pipeline View" +caption="Codefresh UI Pipeline View" +max-width="90%" +%} + +Here is the YAML for the second pipeline: + +`codefresh-download.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +stages: + - "clone" + - "download" + - "build" + - "push" + +steps: + clone: + title: "Cloning main repository..." + type: "git-clone" + stage: "clone" + arguments: + repo: "codefresh-contrib/gcloud-storage-sample-app" + git: "github" + revision: "master" + download: + title: "Downloading dependency lib from GS..." + type: "freestyle" + stage: "download" + working_directory: ${{clone}} + arguments: + image: "google/cloud-sdk:slim" + commands: + - gsutil cp gs://anna-demo-bucket/scala-library-2.12.2.jar . + build: + title: "Building docker image..." + type: "build" + stage: "build" + working_directory: ${{clone}} + arguments: + image_name: "annabaker/gcloud-storage-sample-app" + tag: "master" + push_to_my_registry: + stage: "push" + type: "push" + title: "Pushing to external registry..." + arguments: + candidate: ${{build}} + tag: '1.0.0' + registry: "dockerhub" +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the source code through a [Git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/). +2. Downloads the dependency JAR from our publicly-accessible Google Storage bucket through a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). +3. Builds a docker image through a [build step]({{site.baseurl}}/docs/pipelines/steps/build/). +4. Pushes the Docker image to the DockerHub registry you have integrated with Codefresh through a [push step](https://codefresh.io/docs/docs/pipelines/steps/push/). + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) + + diff --git a/_docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline.md b/_docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline.md new file mode 100644 index 00000000..d02cee77 --- /dev/null +++ b/_docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline.md @@ -0,0 +1,116 @@ +--- +title: "Vault secrets in pipelines" +description: "Access and refer to Vault secrets in pipelines" +group: example-catalog +sub_group: ci-examples +toc: true +--- + +Codefresh offers a Vault plugin you may use from the [Step Marketplace](https://codefresh.io/steps/step/vault){:target="\_blank"}. The plugin imports key-value pairs from the Vault server, and exports them into the pipeline. + +## Prerequisites + +- A [free Codefresh account](https://codefresh.io/docs/docs/getting-started/create-a-codefresh-account/) +- An existing Vault server [already setup](https://learn.hashicorp.com/vault/getting-started/install){:target="\_blank"} +- A secret stored in said Vault server with a key of `password` +- A Vault [authorization token](https://learn.hashicorp.com/vault/getting-started/authentication#tokens){:target="\_blank"} + +## Example Java application + +You can find the example project on [GitHub](https://github.com/codefresh-contrib/vault-sample-app){:target="\_blank"}. + +The example application retrieves the system variable `password` from the pipeline, and uses it to authenticate to a Redis database, but you are free to use any type of database of your choosing. + +```java + String password = System.getenv("password"); + String host = System.getProperty("server.host"); + + RedisClient redisClient = new RedisClient( + RedisURI.create("redis://" + password + "@" + host + ":6379")); + RedisConnection connection = redisClient.connect(); +``` + +Also in the example application is a simple unit test that ensures we are able to read and write data to the database. + +You cannot run the application locally, as it needs to run in the pipeline in order to use our environment variables to connect. + +## Create the pipeline + +The following pipeline contains three steps: a vault step, a [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) step, and a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +{% include image.html +lightbox="true" +file="/images/examples/secrets/vault-pipeline.png" +url="/images/examples/secrets/vault-pipeline.png" +alt="Vault pipeline" +caption="Vault Pipeline" +max-width="100%" +%} + +You should be able to copy and paste this YAML into the in-line editor in the Codefresh UI. It will automatically clone the project for you. + +Note that you need to change the `VAULT_ADDR`, `VAULT_AUTH`, and `VAULT_AUTH_TOKEN` arguments within the first step to your respective values. + +`codefresh.yml` +```yaml +version: "1.0" +stages: + - "vault" + - "clone" + - "package" +steps: + vault: + title: Importing vault values... + stage: "vault" + type: vault + arguments: + VAULT_ADDR: 'http://:' + VAULT_PATH: 'path/to/secret' + VAULT_AUTH_TOKEN: '' + clone: + title: Cloning main repository... + type: git-clone + arguments: + repo: 'codefresh-contrib/vault-sample-app' + git: github + stage: clone + package_jar: + title: Packaging jar and running unit tests... + stage: package + working_directory: ${{clone}} + arguments: + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository -Dserver.host=my-redis-db-host clean package + services: + composition: + my-redis-db-host: + image: 'redis:4-alpine' + command: 'redis-server --requirepass $password' + ports: + - 6379 +``` + +The pipeline does the following: + +1. Imports the key-value pairs from the Vault server and exports them into the pipeline under `/meta/env_vars_to_export`. +2. Clones the main repository (note the special use of naming the step `main_clone`). This ensures that all subsequent commands are run [inside the project that was checked out]({{site.baseurl}}/docs/pipelines/steps/git-clone/#basic-clone-step-project-based-pipeline). +3. The `package_jar`, does a few special things to take note of: + - Spins up a [Service Container]({{site.baseurl}}/docs/pipelines/service-containers/) running Redis on port 6379 , and sets the password to the database using our exported environment variable + - Sets `maven.repo.local` to cache Maven dependencies into the local codefresh volume to [speed up builds]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/#caching-the-maven-dependencies) + - Runs unit tests and packages the jar. Note how you can directly refer to the service container's name (`my-redis-db-host`) when we set `server.host` + +You will see that the variable was correctly exported to the pipeline by running a simple `echo` command: + {% include image.html + lightbox="true" + file="/images/examples/secrets/vault-pipeline2.png" + url="/images/examples/secrets/vault-pipeline2.png" + alt="Vault pipeline variable" + caption="Vault pipeline variable" + max-width="100%" + %} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) +[Steps in pipelines]({{site.baseurl}}/docs/pipelines/steps/) + diff --git a/_docs/example-catalog/ci-examples/voting-app.md b/_docs/example-catalog/ci-examples/voting-app.md new file mode 100644 index 00000000..08cb4a5c --- /dev/null +++ b/_docs/example-catalog/ci-examples/voting-app.md @@ -0,0 +1,93 @@ +--- +title: "Voting app" +description: "" +excerpt: "" +group: example-catalog +sub_group: ci-examples +redirect_from: + - /docs/voting-app-1/ + - /docs/python/voting-app/ +toc: true +--- +This voting application is a demo with which you can build an advanced composition that uses `Python, Redis, Postgres, Node.js, and .Net`. + +## Looking around +In the root of this repository you'll find a file named codefresh.yml, this is our build descriptor and it describes the different steps that comprise our process. Let's quickly review the contents of this file: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + unit-tests: + image: codefresh/buildpacks:nodejs-5 + working-directory : ${{initial-clone}} + commands: + - echo Installing npm modules silent + - npm install + - gulp test + - echo $(date) + + build-step: + #title: Build My Image #Display name for the step + type: build + image-name: containers101/cf-example-result + tag: ${{CF_BRANCH}} + build_arguments: + - OPTION_A=${{OPTION_A}} + - OPTION_B=${{OPTION_B}} + + push-to-registry: + type: push + #candidate: the image from the build step + candidate: ${{build-step}} + tag: ${{CF_BRANCH}} + + integration-tests-step: + type: composition + #location of the compostion on the filesystem of the cloned image + composition: './cf-compositions/voting-app-full.yml' + #run integration only when pushing to master + when: + branch: + only: + - master #can also be regex + composition-candidates: + #this will be the image that we will test + integ-test: + image: containers101/cf-example-tests:master + command: ./tests.sh + composition-variables: + - VOTING_OPTION_A=${{OPTION_A}} + - VOTING_OPTION_B=${{OPTION_B}} + + launch-composition: + type: launch-composition + environment-name: 'Test composition after build' + composition: './cf-compositions/voting-app-full.yml' + composition-variables: + - VOTING_OPTION_A=${{OPTION_A}} + - VOTING_OPTION_B=${{OPTION_B}} + + deploy to ecs: + image: codefresh/cf-deploy-ecs + commands: + - cfecs-update --image-name containers101/cf-example-result --image-tag ${{CF_BRANCH}} eu-west-1 vote-app result + environment: + - AWS_ACCESS_KEY_ID=${{AWS_ACCESS_KEY_ID}} + - AWS_SECRET_ACCESS_KEY=${{AWS_SECRET_ACCESS_KEY}} + when: + condition: + all: + pushCommit: 'includes(lower("${{CF_COMMIT_MESSAGE}}"), "[deploy]") == true' +{% endraw %} +{% endhighlight %} + +{{site.data.callout.callout_info}} +##### Example + +Just head over to the example [__repository__](https://github.com/containers101/cf-example-result){:target="_blank"} in GitHub and follow the instructions there. +{{site.data.callout.end}} + +## Related articles +[CI/CD pipeline examples]({{site.baseurl}}/docs/example-catalog/ci-examples/) diff --git a/_docs/example-catalog/examples.md b/_docs/example-catalog/examples.md new file mode 100644 index 00000000..9dee4525 --- /dev/null +++ b/_docs/example-catalog/examples.md @@ -0,0 +1,127 @@ +--- +title: "CI/CD pipeline examples" +description: "A collection of examples for Codefresh pipelines" +group: example-catalog +redirect_from: + - /docs/examples-v01/ + - examples.html + - /docs/catalog-examples/ + - /docs/examples/ + - /docs/pipelines-examples/ + - /docs/pipelines/pipelines-examples/ +toc: true +--- +Codefresh enables you to define the steps of your pipeline in a [YAML file]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/). By default, the file is named `codefresh.yml`, and is located in the root directory of the repository. + +## CI examples + +### Programming-language specific examples + +Codefresh is agnostic as far as programming languages are concerned. All major programming languages are supported: + +- [Go Web App]({{site.baseurl}}/docs/example-catalog/ci-examples/golang-hello-world/) or [Go CLI]({{site.baseurl}}/docs/example-catalog/golang/goreleaser) +- [Spring Java app with Maven]({{site.baseurl}}/docs/example-catalog/ci-examples/spring-boot-2/) or [Gradle]({{site.baseurl}}/docs/example-catalog/ci-examples/gradle/). Also how to [upload JAR to Nexus/Artifactory]({{site.baseurl}}/docs/example-catalog/ci-examples/publish-jar/) +- Node [Express.js App]({{site.baseurl}}/docs/example-catalog/ci-examples/lets-chat/) or [React.js App]({{site.baseurl}}/docs/example-catalog/ci-examples/react/) +- [Php App]({{site.baseurl}}/docs/example-catalog/ci-examples/php) +- [Python Django App]({{site.baseurl}}/docs/example-catalog/ci-examples/django/) +- [Ruby On Rails App]({{site.baseurl}}/docs/example-catalog/ci-examples/ruby) +- [C]({{site.baseurl}}/docs/example-catalog/ci-examples/c-make/) or [C++]({{site.baseurl}}/docs/example-catalog/ci-examples/cpp-cmake) +- [Rust]({{site.baseurl}}/docs/example-catalog/ci-examples/rust/) +- [C# .NET core]({{site.baseurl}}/docs/example-catalog/ci-examples/dotnet/) +- [Scala App]({{site.baseurl}}/docs/example-catalog/ci-examples/scala-hello-world/) +- [Android (Mobile)]({{site.baseurl}}/docs/example-catalog/ci-examples/android/) + +### Source code checkout examples + +You can check out code from one or more repositories in any pipeline phase. Codefresh includes [built-in GIT integration]({{site.baseurl}}/docs/integrations/git-providers/) with all the popular GIT providers and can be used with [git-clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) steps. + +- [Cloning Git repositories using the built-in integration]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout/) +- [Cloning Git repositories using manual Git commands]({{site.baseurl}}/docs/example-catalog/ci-examples/git-checkout-custom/) +- [Checking out from Subversion, Perforce, Mercurial, etc ]({{site.baseurl}}/docs/example-catalog/ci-examples/non-git-checkout/) + +### Build/push examples + +Codefresh has native support for [building]({{site.baseurl}}/docs/pipelines/steps/build/) and [pushing]({{site.baseurl}}/docs/pipelines/steps/push/) Docker containers. +You can also compile traditional applications that are not Dockerized yet. + +- [Build an Image with the Dockerfile in root directory]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-dockerfile-in-root-directory/) +- [Build an Image by specifying the Dockerfile location]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-specify-dockerfile-location) +- [Build an Image from a different Git repository]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-from-a-different-git-repository) +- [Build and Push an Image]({{site.baseurl}}/docs/example-catalog/ci-examples/build-and-push-an-image) +- [Build an Image with build arguments]({{site.baseurl}}/docs/example-catalog/ci-examples/build-an-image-with-build-arguments) +- [Share data between steps]({{site.baseurl}}/docs/example-catalog/ci-examples/shared-volumes-between-builds) +- [Upload or download from a Google Storage Bucket]({{site.baseurl}}/docs/example-catalog/ci-examples/uploading-or-downloading-from-gs/) +- [Get Short SHA ID and use it in a CI process]({{site.baseurl}}/docs/example-catalog/ci-examples/get-short-sha-id-and-use-it-in-a-ci-process) +- [Call a CD pipeline from a CI pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/call-child-pipelines) +- [Trigger a Kubernetes Deployment from a Dockerhub Push Event]({{site.baseurl}}/docs/example-catalog/ci-examples/trigger-a-k8s-deployment-from-docker-registry/) + + +### Unit and integration test examples + +Codefresh has support for both [unit]({{site.baseurl}}/docs/testing/unit-tests/) and [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) as well as [test reporting]({{site.baseurl}}/docs/testing/test-reports/). + +- [Run unit tests]({{site.baseurl}}/docs/example-catalog/ci-examples/run-unit-tests) +- [Run integration tests]({{site.baseurl}}/docs/example-catalog/ci-examples/run-integration-tests/) +- [Run integration tests with MongoDB]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mongo/) +- [Run integration tests with MySQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-mysql/) +- [Run integration tests with PostgreSQL]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-postgres/) +- [Run integration tests with Redis]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-redis/) +- [Populate a database with existing data]({{site.baseurl}}/docs/example-catalog/populate-a-database-with-existing-data) + +- [Shared volumes of service from composition step for other yml steps]({{site.baseurl}}/docs/example-catalog/shared-volumes-of-service-from-composition-step-for-other-yml-steps) +- [Launch Composition]({{site.baseurl}}/docs/example-catalog/ci-examples/launch-composition) +- [Launch Composition and define Service Environment variables using a file]({{site.baseurl}}/docs/example-catalog/ci-examples/launching-a-composition-and-defining-a-service-environment-variables-using-a-file) +- [Run multiple kinds of unit tests using fan-in-fan-out parallel pipeline]({{site.baseurl}}/docs/example-catalog/fan-in-fan-out) + +### Code coverage examples + +- [Run coverage reports with Codecov]({{site.baseurl}}/docs/example-catalog/ci-examples/codecov-testing) +- [Run coverage reports with Coveralls]({{site.baseurl}}/docs/example-catalog/ci-examples/coveralls-testing) +- [Run coverage reports with Codacy]({{site.baseurl}}/docs/example-catalog/ci-examples/codacy-testing) + +### Secrets examples + +Codefresh can automatically export secret key-value pairs using the Vault plugin from the [Step Marketplace](https://codefresh.io/steps/step/vault). + +- [Vault secrets in the Pipeline]({{site.baseurl}}/docs/example-catalog/ci-examples/vault-secrets-in-the-pipeline) +- [Decryption with Mozilla SOPS]({{site.baseurl}}/docs/example-catalog/ci-examples/ci-examples/decryption-with-mozilla-sops) +- [GitOps with Bitnami sealed secrets]({{site.baseurl}}/docs/example-catalog/ci-examples/gitops-secrets) + +### Notification examples + +- [Send notification to Slack]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-slack) +- [Send notification to Jira]({{site.baseurl}}/docs/example-catalog/ci-examples/sending-the-notification-to-jira) + + +## CD examples + +### Preview environment examples + +Codefresh can automatically launch environments (powered by Docker swarm) to [preview a Pull Reqest or feature]({{site.baseurl}}/docs/getting-started/on-demand-environments/). The definition of the environment can come from an [existing composition]({{site.baseurl}}/docs/testing/create-composition/), a docker-compose file or an inline YAML. Preview environments can be launched manually or [automatically from pipelines]({{site.baseurl}}/docs/pipelines/steps/launch-composition/). + +- [MongoDB preload data]({{site.baseurl}}/docs/example-catalog/cd-examples/import-data-to-mongodb/) +- [NodeJS + Angular2 + MongoDB]({{site.baseurl}}/docs/example-catalog/cd-examples/nodejs-angular2-mongodb/) +- [NGINX Basic Auth]({{site.baseurl}}/docs/example-catalog/cd-examples/secure-a-docker-container-using-http-basic-auth/) +- [Spring Boot + Kafka + Zookeeper]({{site.baseurl}}/docs/example-catalog/cd-examples/spring-boot-kafka-zookeeper/) +- [Web terminal]({{site.baseurl}}/docs/example-catalog/cd-examples/web-terminal/) + +### Deployment examples + +Codefresh can deploy to any platform such as VMs, FTP/SSH/S3 sites, app servers, but of course it has great support for [Kubernetes clusters]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) and [Helm releases]({{site.baseurl}}/docs/new-helm/helm-releases-management/): + +- [Deploy to a VM with packer]({{site.baseurl}}/docs/example-catalog/cd-examples/packer-gcloud/) +- [Deploy to a VM with FTP]({{site.baseurl}}/docs/example-catalog/cd-examples/transferring-php-ftp) +- [Deploy to Tomcat using SCP]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-to-tomcat-via-scp) +- [Deploy Demochat to a Kubernetes cluster]({{site.baseurl}}/docs/cd-examples/deploy-to-kubernetes/codefresh-kubernetes-integration-demochat-example/) +- [Use kubectl as part of freestyle step]({{site.baseurl}}/docs/example-catalog/cd-examples/use-kubectl-as-part-of-freestyle-step) +- [Deploy with Kustomize]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-with-kustomize) +- [Deploy with Helm]({{site.baseurl}}/docs/example-catalog/cd-examples/helm) +- [Deploy with Terraform]({{site.baseurl}}/docs/example-catalog/cd-examples/terraform) +- [Deploy with Pulumi]({{site.baseurl}}/docs/example-catalog/cd-examples/pulumi) +- [Deploy to Nomad]({{site.baseurl}}/docs/example-catalog/cd-examples/nomad) +- [Deploy to Heroku]({{site.baseurl}}/docs/example-catalog/cd-examples/deploy-to-heroku/) +- [Deploy to Docker swarm]({{site.baseurl}}/docs/example-catalog/cd-examples/docker-swarm/) +- [Deploy to Elastic Beanstalk]({{site.baseurl}}/docs/example-catalog/cd-examples/elastic-beanstalk/) +- [Deploy to Amazon ECS/Fargate]({{site.baseurl}}/docs/example-catalog/cd-examples/amazon-ecs/) + + diff --git a/_docs/example-catalog/gitops-example.md b/_docs/example-catalog/gitops-example.md new file mode 100644 index 00000000..ba1c727d --- /dev/null +++ b/_docs/example-catalog/gitops-example.md @@ -0,0 +1,9 @@ +--- +title: "GitOps examples" +description: "A collection of examples for GitOps deployments" +group: example-catalog +sub_group: gitops-examples +toc: true +--- + +TBD \ No newline at end of file diff --git a/_docs/getting-started/architecture.md b/_docs/getting-started/architecture.md deleted file mode 100644 index 584fd507..00000000 --- a/_docs/getting-started/architecture.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: "Architecture" -description: "" -group: getting-started -toc: true ---- - -Codefresh GitOps is built around an enterprise version of the Argo Ecosystem, fully compliant with the GitOps paradigm, with industry-standard security. -To cater to differing requirements and degrees of enterprise security, Codefresh supports hosted and hybrid installation environments for Codefresh runtimes. - -The sections that follow illustrate the architecture of the different installation environments, starting with a high-level overview of the Codefresh Platform. - -### Codefresh architecture - -The diagram shows a high-level view of the Codefresh Platform and its core components, the Codefresh Control Plane, the Codefresh Runtime, and the Codefresh Clients. - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-codefresh-simple.png" - url="/images/getting-started/architecture/arch-codefresh-simple.png" - alt="Codefresh Platform architecture" - caption="Codefresh Platform architecture" - max-width="100%" -%} - -{::nomarkdown} -
      -{:/} - -#### Codefresh Control Plane -The Codefresh Control Plane is the SaaS component in the platform. External to the enterprise firewall, it does not have direct communication with the Codefresh Runtime, Codefresh Clients, or the customer's organizational systems. The Codefresh Runtime and the Codefresh Clients communicate with the Codefresh Control Plane to retrieve the required information. - - -{::nomarkdown} -
      -{:/} - -#### Codefresh Runtime -The Codefresh Runtime is installed on a Kubernetes cluster, and houses the enterprise distribution of the Codefresh Application Proxy and the Argo Project. -Depending on the type of installation environment, the Codefresh Runtime is installed either in the Codefresh platform (hosted), or in the customer environment (hybrid). Read more in [Codefresh runtime architecture](#codefresh-runtime-architecture). - - -{::nomarkdown} -
      -{:/} - -#### Codefresh Clients - -Codefresh Clients include the Codefresh UI and the Codefresh CLI. -The Codefresh UI provides a unified, enterprise-wide view of deployments (runtimes and clusters), and CI/CD operations (Delivery Pipelines, workflows, and deployments) in the same location. -The Codefresh CLI includes commands to install hybrid runtimes, add external clusters, and manage runtimes and clusters. - -### Codefresh runtime architecture -The sections that follow show detailed views of runtime architecture in the different installation environments, and descriptions of the Codefresh Runtime components. - -* [Hosted GitOps runtime architecture](#hosted-gitops-runtime-architecture) - In this installation environment, the Codefresh Runtime is installed on a _Codefresh-managed cluster_ in the Codefresh platform. -* Hybrid runtime architecture: - In this installation environment, the Codefresh Runtime is installed on a _customer-managed cluster_ in the customer environment. The Codefresh Runtime with or without ingress controllers: - * [Ingress controller](#ingress-controller-hybrid-runtime-architecture) - * [Ingress-less](#ingress-less-hybrid-runtime-architecture) -* Runtime components - * [Codefresh Application Proxy](#codefresh-application-proxy) - * [Argo Project](#argo-project) - * [Request Routing Service](#request-routing-service) - * [Tunnel Server](#codefresh-tunnel-server) - * [Tunnel Client](#codefresh-tunnel-client) - - -#### Hosted GitOps runtime architecture -In the hosted environment, the Codefresh Runtime is installed on a K8s cluster managed by Codefresh. - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-hosted.png" - url="/images/getting-started/architecture/arch-hosted.png" - alt="Hosted runtime architecture" - caption="Hosted runtime architecture" - max-width="100%" -%} - -#### Ingress controller hybrid runtime architecture -Runtimes with ingress use an ingress controller to control communication between the Codefresh Runtime in the customer cluster and the Codefresh Platform. Ingress controllers are optimal when the cluster with the Codefresh Runtime is exposed to the internet. - - - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-hybrid-ingress.png" - url="/images/getting-started/architecture/arch-hybrid-ingress.png" - alt="Ingress-based hybrid runtime architecture" - caption="Ingress-based hybrid runtime architecture" - max-width="100%" -%} - -#### Ingress-less hybrid runtime architecture -Ingress-less runtimes uses tunneling to control communication between the Codefresh Runtime in the customer cluster and the Codefresh Platform. Ingress-less runtimes are optimal when the cluster with the Codefresh Runtime is not exposed to the internet. - -{% include - image.html - lightbox="true" - file="/images/getting-started/architecture/arch-hybrid-ingressless.png" - url="/images/getting-started/architecture/arch-hybrid-ingressless.png" - alt="Ingress-less hybrid runtime architecture" - caption="Ingress-less hybrid runtime architecture" - max-width="100%" -%} - - - -#### Codefresh Application Proxy -The Codefresh Application Proxy (App-Proxy) functions as the Codefresh agent, and is deployed as a service in the Codefresh Runtime. -For hybrid runtimes with ingress, the App-Proxy is the single point-of-contact between the Codefresh Runtime, and the Codefresh Clients, the Codefresh Platform, and any organizational systems in the customer environment. -For ingress-less hybrid runtimes, the Tunnel Client forwards the incoming traffic from the Tunnel Server using internal reverse proxy to the App-Proxy. - -The App-Proxy: -* Accepts and serves requests from Codefresh Clients either via the Codefresh UI or CLI -* Retrieves a list of Git repositories for visualization in Codefresh Clients -* Retrieves permissions from the Codefresh Control Plane to authenticate and authorize users for the required operations. -* Implements commits for GitOps-controlled entities, such as Delivery Pipelines and other CI resources -* Implements state-change operations for non-GitOps controlled entities, such as terminating Argo Workflows - -{::nomarkdown} -
      -{:/} - -#### Argo Project - -The Argo Project includes: -* Argo CD for declarative continuous deployment -* Argo Rollouts for progressive delivery -* Argo Workflows as the workflow engine -* Argo Events for event-driven workflow automation framework - - -{::nomarkdown} -

      -{:/} - -#### Request Routing Service -The Request Routing Service is installed on the same cluster as the Codefresh Runtime in the customer environment. -It receives requests from the ingress controller (ingress) or the Tunnel Client (ingress-less), and forwards the request URLs to the Application Proxy, and webhooks directly to the Event Sources. - ->Important: - The Request Routing Service is available from runtime version 0.0.543 and higher. - Older runtime versions are not affected as there is complete backward compatibility, and the ingress controller continues to route incoming requests. - -#### Tunnel Server -Applies only to _ingress-less_ runtimes in hybrid installation environments. -The Codefresh Tunnel Server is installed in the Codefresh platform. It communicates with the enterprise cluster located behind a NAT or firewall. - -The Tunnel Server: -* Forwards traffic from Codefresh Clients to the client (customer) cluster. -* Manages the lifecycle of the Codefresh Tunnel Client. -* Authenticates requests from the Codefresh Tunnel Client to open tunneling connections. - -{::nomarkdown} -
      -{:/} - -#### Tunnel Client -Applies only to _ingress-less_ runtimes in hybrid installation environments. - -Installed on the same cluster as the Codefresh Runtime, the Codefresh Tunnel Client establishes the tunneling connection to the Codefresh Tunnel Server via the WebSocket Secure (WSS) protocol. -A single Codefresh Runtime can have a single Tunnel Client. - -The Codefresh Tunnel Client: -* Initiates the connection with the Codefresh Tunnel Server. -* Forwards the incoming traffic from the Tunnel Server through the Request Routing Service to App-Proxy, and other services. - -{::nomarkdown} -
      -{:/} - - -### Customer environment -The customer environment that communicates with the Codefresh Runtime and the Codefresh Platform, generally includes: -* Ingress controller for ingress hybrid runtimes - The ingress controller is configured on the same Kubernetes cluster as the Codefresh Runtime, and implements the ingress traffic rules for the Codefresh Runtime. - See [Ingress controller requirements]({{site.baseurl}}/docs/runtime/requirements/#ingress-controller). -* Managed clusters - Managed clusters are external clusters registered to provisioned hosted or hybrid runtimes for application deployment. - Hosted runtimes requires you to connect at least one external K8s cluster as part of setting up the Hosted GitOps environment. - Hybrid runtimes allow you to add external clusters after provisioning the runtimes. - See [Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/). -* Organizational systems - Organizational Systems include the customer's tracking, monitoring, notification, container registries, Git providers, and other systems. They can be entirely on-premises or in the public cloud. - Either the ingress controller (ingress hybrid environments), or the Tunnel Client (ingress-less hybrid environments), forwards incoming events to the Codefresh Application Proxy. - -### Related articles -[Set up a hosted runtime environment]({{site.baseurl}}/docs/runtime/hosted-runtime/) -[Install a hybrid runtime]({{site.baseurl}}/docs/runtime/installation/) - - - - diff --git a/_docs/getting-started/csdp-introduction.md b/_docs/getting-started/csdp-introduction.md deleted file mode 100644 index 6d48105f..00000000 --- a/_docs/getting-started/csdp-introduction.md +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: "Introducing Codefresh" -description: "" -group: getting-started -toc: true ---- - -Codefresh is a full-featured, turn-key solution for application deployments and releases. Powered by Argo, Codefresh uses Argo CD, Argo Workflows, Argo Events, and Argo Rollouts, extended with unique functionality and features essential for enterprise deployments. - -Codefresh offers security, maintainability, traceability, and most importantly, a single control plane for all stakeholders, be they developers, operators, product owners or project managers. - -With Codefresh, you can: - -* Deliver software at scale by managing hundreds or thousands of deployment targets and applications -* Get a secure, enterprise-ready distribution of Argo with built-in identity, RBAC (role-based access control), and secrets -* Gain clear visibility across all deployments and trace changes and regressions from code to cloud in seconds -* Get enterprise-level dedicated support for Argo deployments -* Get insights into every aspect of your CI/CD with smart dashboards -* Manage multiple runtimes and multiple clusters in a single pane of glass - - -### Codefresh deployment models - -Codefresh supports hosted and hybrid deployments: - -* **Hosted** deployment or Hosted GitOps, a hosted and managed version of Argo CD. The SaaS version of Codefresh, the runtime is hosted on a Codefresh cluster (easy setup) and managed by Codefresh (zero maintenance overhead). -Click once to provision the hosted runtime, and start deploying applications to clusters without having to install and maintain Argo CD. - - -* **Hybrid** deployment, with the runtime hosted on the customer cluster and managed by the customer. -The hybrid offering retains runtimes within the customer infrastructure while giving you the power of Argo CD with Codefresh's CI and CD tools, to help achieve continuous integration and continuous delivery goals. - -For details, see [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture). - - -### Codefresh and open source Argo -Codefresh brings the power of the Argo project to your Kubernetes deployments: - -* Argo CD for declarative continuous deployment -* Argo Rollouts for progressive delivery -* Argo Workflows as the workflow engine -* Argo Events for event-driven workflow automation framework - -Codefresh creates a conformed fork of the Argo project, providing an enterprise-supported version of the same, enhanced with unique functionality. - - - -### Codefresh and GitOps -Codefresh is GitOps-centric, and supports GitOps from the ground up. Codefresh leverages Argo components to have the entire desired state applied from Git to your Kubernetes cluster, and then reported back to Codefresh. -In addition: - -* Every state change operation in Codefresh is made via Git -* Codefresh audit log is derived from the Git changelog -* Codefresh access control is derived from Git permissions - -For details, see [entity model]({{site.baseurl}}/docs/getting-started/entity-model) and [access control]({{site.baseurl}}/docs/administration/access-control). - - -### Insights in Codefresh -Codefresh makes it easy to both access and visualize critical information for any CI/CD resource at any stage, at any level, and for anyone, from managers to DevOps engineers. - -{::nomarkdown} -
      - {:/} - -#### Global deployment analytics - -The Home dashboard presents system-wide highlights in real-time, making it an ideal tool for management. -Get insights into important KPIs for entities across runtimes and clusters, in the same location. View status of runtimes and managed clusters, deployments, failed deployments with rollbacks, most active applications, and Delivery Pipelines. - -{% include - image.html - lightbox="true" - file="/images/incubation/home-dashboard.png" - url="/images/incubation/home-dashboard.png" - alt="Global deployment analytics" - caption="Global deployment analytics" - max-width="70%" -%} - -{::nomarkdown} -
      - {:/} - -#### DORA metrics - -DORA metrics has become integral to enterprises wanting to quantify DevOps performance, and Codefresh has out-of-the-box support for it. - - -Apart from the metrics themselves, the DORA dashboard in Codefresh has several features such as the Totals bar with key metrics, filters that allow you to pinpoint just which applications or runtimes are contributing to problematic metrics, and the ability to set a different view granularity for each DORA metric. - -See [DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/). - -{% include - image.html - lightbox="true" - file="/images/incubation/intro-dora-metrics.png" - url="/images/incubation/intro-dora-metrics.png" - alt="DORA metrics" - caption="DORA metrics" - max-width="60%" -%} - -{::nomarkdown} -
      - {:/} - -#### Application analytics and analysis - -The Applications dashboard displays a unified view of applications across runtimes and clusters. No matter what the volume and frequency of your deployments, the Applications dashboard makes it easy to track them. Search for Jira issues, commit messages, committers, and see exactly when and if the change was applied to a specific application. - -See [Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/). - -{::nomarkdown} -
      - {:/} - -#### Delivery Pipelines -The Delivery Pipelines dashboard displays aggregated performance analytics based on the pipeline’s workflows, including step analytics across all the workflows in the pipeline. - -{::nomarkdown} -
      - {:/} - -#### Workflows -View and monitor submitted workflows across all pipelines in the Workflows dashboard. Select a time range, or view up to fifty of the most recent workflows for all the pipelines in the runtime. Drill down to any workflow for further analysis. -{::nomarkdown} -
      - {:/} - -### CI/CD resources in Codefresh -Wizards make it easy to create delivery pipelines and applications. Smart views and options make it easier to monitor and manage them. -{::nomarkdown} -

      - {:/} - -#### Delivery Pipelines - -Delivery Pipelines are where the CI magic happens in Codefresh. Our pipeline creation wizard removes the complexity from creating, validating, and maintaining pipelines. Every stage has multi-layered views of all the related Git change information for the pipeline. -See [Create delivery pipelines]({{site.baseurl}}/docs/pipelines/create-pipeline/). - -{::nomarkdown} -
      - {:/} - -#### Workflows -Drill down into a workflow to visualize the connections between the steps in the workflow. -A unique feature is the incorporation of Argo Events into the workflow visualization. You get a unified view of Argo Events and Argo Workflows in the same location, the events that triggered the workflow combined with the workflow itself. - -{::nomarkdown} -
      - {:/} - -#### Workflow Templates -Select from ready-to-use Workflow Templates in the Codefresh Hub for Argo or create your own custom template. The **Run** option allows you to test a new Workflow Template, or changes to an existing template, without needing to first commit the changes. - - {% include - image.html - lightbox="true" - file="/images/whats-new/wrkflow-template-main.png" - url="/images/whats-new/wrkflow-template-main.png" - alt="Workflow Templates" - caption="Workflow Templates" - max-width="70%" - %} - -{::nomarkdown} -
      - {:/} - -#### Applications -Create GitOps-compliant applications, and manage the application lifecycle in the Codefresh UI. - -Define all application settings in a single location through the intuitive Form mode or directly in YAML, and commit all changes to Git. -For easy access, after commit, the configuration settings are available in the Applications dashboard along with the deployment and resource information. - -See [Applications]({{site.baseurl}}/docs/deployment/create-application/). - -{% include - image.html - lightbox="true" - file="/images/applications/add-app-general-settings.png" - url="/images/applications/add-app-general-settings.png" - alt="Application creation in Codefresh" - caption="Application creation in Codefresh" - max-width="60%" -%} - -### GitOps CI integrations - -If you have Hosted GitOps, and your own CI tools for pipelines and workflows, enrich your deployments with CI information without disrupting existing processes. -Simply connect your CI tools to Codefresh, and our new report image template retrieves the information. For example, add the report image step in your GitHub Actions pipeline and reference the different integrations for Codefresh to retrieve and enrich the image with Jira ticket information. - -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). - -{% include - image.html - lightbox="true" - file="/images/incubation/github-action-int-settings.png" - url="/images/incubation/github-action-int-settings.png" - alt="Image enrichment with GitHub Actions integration" - caption="Image enrichment with GitHub Actions integration" - max-width="60%" -%} - - -### What to read next -[Quick start tutorials]({{site.baseurl}}/docs/getting-started/quick-start) \ No newline at end of file diff --git a/_docs/getting-started/entity-model.md b/_docs/getting-started/entity-model.md deleted file mode 100644 index 302940c1..00000000 --- a/_docs/getting-started/entity-model.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: "Entity model" -description: "" -group: getting-started -toc: true ---- - -The Codefresh entity model is derived from these entity types: -* Codefresh account/user management entities -* Argo ecosystem entities -* Workflow, runtime, and Git Source entities -* Codefresh-specific entities such as pipelines, images, and applications - - - -### Codefresh account/user management entities -The account/user management entity types includes entities that do not share a direct relationship to the Codefresh domain. These are enterprise-specific entities in standard SAAS solutions. - -#### Account -Every user who signs in to Codefresh gets a private administrator user account. - -If you received an invitation to Codefresh, instead of a private user account, you are added as a collaborator to the main account. Your permissions are based on those explicitly assigned to you. - -The number of collaborators in an account is defined by the current plan associated with it. - -#### User -A user in Codefresh is one who has completed the sign-up process, and can log in using authorized third-party systems such as: -* GitHub -* Bitbucket -* GitLab -* Azure -* Google - -> If you configure SSO (Single Sign-On) for the account, the user can log in using only the configured SSO. - -#### Billing -For details, please contact [Sales](mailto:sales@codefresh.io?subject=[Codefresh] Codefresh billing inquiry). - -#### Single Sign-On (SSO) -Enterprise accounts can configure SSO. For details, see [Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/administration/single-sign-on/). - -#### Security configuration -Security settings include: -* Inactivity timeout per collaborator account -* Domain restriction for invitations - -### Argo ecosystem entities -Codefresh is built on top of the successful open source Argo project, and as such, supports all the native Argo project-entities. -You can apply every supported entity that exists in the open source projects to your Codefresh account. - -### Workflow -Codefresh shows all the workflows executed with Argo Workflows. -Workflows with pipelines display links to the pipelines. Users can terminate or retry a workflow, and view its logs. - -### Runtime -A runtime represents an installation of Codefresh on the customer's K8s cluster, and contains all the components required to perform all tasks on the cluster. - -Review [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/), and [runtime installation ]({{site.baseurl}}/docs/runtime/installation/). - -### Git Source -A Git Source is a link to a Git repository that stores GitOps-controlled entities. You can create as many as Git Sources as you require. - -To understand how to control Git Sources using GitOps, see [access control]({{site.baseurl}}/docs/administration/access-control/). - -### Codefresh high-level entities -Codefresh creates high-level views that better represents, abstracts, and connects all the different entities in the Argo ecosystem. - -#### CI/CD pipeline -A pipeline is a Codefresh-representation of Argo Events, comprising an Argo Events Sensor and Argo Events Triggers. Every trigger in a sensor becomes a different pipeline in Codefresh. The same sensor can be associated with multiple pipelines, if it has different trigger conditions. - -A pipeline links to the following Argo Events entities: -* Sensor -* Event Source -* Workflow Template (or a cluster-level Workflow Template) - -A pipeline also shows all the workflows created from the triggered event associated with that pipeline. - -#### Image -An image represents a built artifact of a Docker image, reported to Codefresh using a dedicated interface. - -Users can use a predefined [Argo Workflow Template](https://codefresh.io/argohub/workflow-template/codefresh-csdp) to help with transferring the image information to Codefresh. - -#### Application -A holistic view of all your Argo CD and Argo Rollouts deployments that link to the underlying artifacts and workflows. diff --git a/_docs/getting-started/faq.md b/_docs/getting-started/faq.md deleted file mode 100644 index bd189aab..00000000 --- a/_docs/getting-started/faq.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: "Frequently asked questions" -description: "" -group: getting-started ---- -We have collected a few of the common questions on the Codefresh solution. - -For questions on Codefresh Classic, navigate to our [FAQs for Codefresh Classic](https://codefresh.io/docs/docs/getting-started/faq/){:target="\_blank"}. - - -**Q. What is the Codefresh platform?** - -A. The Codefresh platform is a full-featured, turn-key solution for application deployments and releases. Powered by the Argo Project, Codefresh uses Argo CD, Argo Workflows, Argo Events, and Argo Rollouts, extended with unique functionality and features essential for enterprise deployments. - -**Q. Which deployment environments does Codefresh support?** - -A. The current release of Codefresh supports hosted and hybrid deployment environments. Stay tuned for our announcement on support for on-premises deployments. - -**Q. How does Codefresh relate to Open Source Argo?** - -A. Codefresh creates a conformed fork of the Argo Project. You get an enterprise-supported version of the Argo Project comprising Argo Workflows, Argo Events, Argo CD, and Argo Rollouts. You can take advantage of the Argo Project offering, with the extended functionality that Codefresh brings to it. - -**Q. I already have a Kubernetes cluster with Argo CD. Can I install Codefresh on the same cluster?** - -A. Hybrid runtimes must be installed on a clean Kubernetes cluster without any Argo Project components. Because we create a conformed fork of the Argo Project in Codefresh, installing it on a cluster with Argo components creates a conflict that will cause the installation to fail. - -**Q. I have resources on my Kubernetes cluster that I want to use in Codefresh. What should I do?** - -A. We will be giving detailed instructions on migrating resources from Kubernetes clusters to Codefresh-based Kubernetes clusters. - -**Q. Does Codefresh support all Git providers?** -A. At the time of writing, Codefresh supports GitHub. We are working to quickly extend support to GitLab and Bitbucket. Stay tuned. - -**Q. What are the browser requirements for the Codefresh UI?** - -A. Officially, we support the latest version of the Chrome browser. Any browser released in the last couple of years should work without major issues. -The following browser versions are **NOT** supported: - -{: .table .table-bordered .table-hover} -| Browser | Version | Date released | -| -------------- | ---------------------------- |-------------------------| -| Chrome | < 51 | May 2016 | -| Firefox | < 54 | Jun 2017 | -| Edge | < 14 | Aug 2016 | -| Safari | < 10 | Sep 2016 | - - -## Migration from Codefresh Classic - -**Q. I have Codefresh Classic. Can I migrate to Codefresh?** -A. At the time of writing, we are working on making the migration from Codefresh Classic to Codefresh as seamless as possible. Stay tuned for the migration announcement. - diff --git a/_docs/getting-started/gitops.md b/_docs/getting-started/gitops.md deleted file mode 100644 index d7542fbb..00000000 --- a/_docs/getting-started/gitops.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: "GitOps approach" -description: "" -group: getting-started -toc: true ---- - -> In the documentation, Kubernetes and K8s are used interchangeably. - -### GitOps - -The Codefresh platform is built entirely around the concept of GitOps, a set of best practices where the entire code delivery process is controlled via Git, including infrastructure and application definition, and automation to complete updates and rollbacks. - -To fully understand the benefits of Codefresh, let's briefly recap GitOps, and how it can help: - -#### Infrastructure as code, the entire system described declaratively - Infrastructure as code is a modern approach that "declaratively" describes the state of a system as code, while having that single source of truth applied to an end-system. The end-systems in most cases are modern cloud native tools. - - Declarative means that configuration is guaranteed by a set of facts, instead of by a set of instructions. With your end system's declarations versioned in Git, you have a single source of truth. You can then both easily deploy and roll back your end system according to the state changes in Git. And more important, if and when disaster strikes, you can also reproduce your cluster’s infrastructure reliably and quickly. - - GitOps is just a specific case of infrastructure as code where the end system is a Kubernetes cluster. - -#### Desired system state versioned in Git - With the declaration of your system stored in a version control system, and serving as your canonical source of truth, you have a single place from which everything is derived and driven. Now not only your application code is in Git, but also all the information required to install and manage your application, including service definition, deployment information, and more. - - Developers can continue with the familiar and convenient approaches they are already using for their applicative code. In addition, Git makes complicated tasks like collaboration (via pull requests), security (via signed commits), permissions (repository permissions), and rollback, as trivial as they can get. - - -#### Use dedicated tools to implement transfer of desired state into the end system - Once the state of your end-system is declared and kept under version control, you need a tool and process to apply the updated desired state into the end system. - - One of the tools for implementing infrastructure as code in the realm of DevOps is [Terraform](https://www.terraform.io/), for example. - - While you can implement GitOps (infrastructure as code for Kubernetes), using a battle-ready tool like Terraform which has a plugin system that also supports Kubernetes, K8s has many nuances that differ from a traditional sync process to a cloud system or some other standard REST API end system. - - To address the specific use cases of Kubernetes, there are new tools dedicated to implementing GitOps (infrastructure as code for k8s), such as [ArgoCD](https://github.com/argoproj/argo-cd). - - diff --git a/_docs/getting-started/main-concepts.md b/_docs/getting-started/main-concepts.md deleted file mode 100644 index c2d4f418..00000000 --- a/_docs/getting-started/main-concepts.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: "Main concepts" -description: "" -group: getting-started -toc: true ---- - -### Built on top of the open source Argo -Codefresh maintains a [conformed](https://github.com/argoproj/argo-conformance-program) fork of the following Argo components, providing an enterprise-supported version of them: -* [Argo CD](https://github.com/argoproj/argo-cd): Declarative continuous deployment for Kubernetes. -* [Argo Rollouts](https://argoproj.github.io/argo-rollouts/): Progressive Delivery for Kubernetes. -* [Argo Workflows](https://github.com/argoproj/argo-workflows): Workflow engine for Kubernetes. -* [Argo Events](https://github.com/argoproj/argo-events): Event-driven workflow automation framework. - -For details, see [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/). - -### Hybrid behind firewall model -Codefresh performs an installation, called a Runtime, on the user's K8s cluster. The Runtime contains all required components for the Codefresh experience. - -For details, see [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/). - -### GitOps native approach -Codefresh is built entirely on the heavily-adopted concept of GitOps. Read the detailed explanation on our [GitOps approach]({{site.baseurl}}/docs/getting-started/gitops/).
      -Codefresh leverages Argo components (Argo CD and Argo Events), to have the entire desired state applied from Git to the user's K8s cluster, and also reported back to Codefresh platform. - -### Every state change operation in Codefresh is made via Git -Codefresh has taken the GitOps approach a step forward by making our entire entity model fully controlled by GitOps via Codefresh, meaning that the entire state of your account is maintained in Git. For details, see [entity model]({{site.baseurl}}/docs/getting-started/entity-model/). - -Codefresh provides a full front-end experience powered by a strong API layer (GraphQL), and every state change (via GraphQL mutation) actually performs a commit on behalf of the user to Git. - -### Audit log derived from Git changelog -Codefresh has built its sophisticated but simple audit log on all operations to the system, for both the Git change and the log of API calls that have been made to the system. -For details, see [audit]({{site.baseurl}}/docs/administration/audit/). - -### Access control derived from Git permissions -Codefresh has built its sophisticated but simple access control model on top of the existing Git operations that are defined externally to the system.
      -For details, see [access control]({{site.baseurl}}/docs/administration/access-control/). diff --git a/_docs/getting-started/quick-start.md b/_docs/getting-started/quick-start.md deleted file mode 100644 index 203c03d0..00000000 --- a/_docs/getting-started/quick-start.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: "Quick start" -description: "" -group: getting-started -toc: true ---- - -Check out our quick start tutorial to get up and running in the Codefresh platform with hosted or hybrid runtimes. - -The tutorial is divided into these sections: -* Provisioning runtimes -* Creating and deploying an application -* Triggering and creating a Delivery Pipeline - -Each section indicates the runtime environment it is relevant to. - -### Provision runtimes -Based on your deployment model, start by provisioning the hosted or hybrid runtime. Hosted and hybrid runtimes can co-exist with each other. - - -#### Hosted -Hosted runtimes are hosted on a Codefresh cluster and managed by Codefresh. You need to provision your hosted runtime once for your account. - -1. [Provision a hosted runtime]({{site.baseurl}}/docs/getting-started/quick-start/install-hosted) - Provision the hosted runtime with a single click, and complete the setup for your hosted environment. - -{::nomarkdown} -
      -{:/} - -#### Hybrid -Hybrid runtimes: Hosted on a customer cluster and managed by the customer. You can provision multiple hybrid runtimes in the same account. - -1. [Prepare for hosted runtime installation]({{site.baseurl}}/docs/getting-started/quick-start/verify-requirements) - Verify your environment matches the requirements for installing Codefresh runtime. -1. [Install hybrid runtime]({{site.baseurl}}/docs/getting-started/quick-start/runtime) - Install the Codefresh runtime by downloading the CLI, installing the runtime, and validate successful installation in the UI - -### Deploy an application - -1. [Create an application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-ui) - Create the `codefresh-guestbook` application in the Codefresh UI. -1. [Create and commit resources for application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-specs) - Create rollout and service resources, and commit these resources to deploy the `codefresh-guestbook` application. -1. [Update the image tag for application]({{site.baseurl}}/docs/getting-started/quick-start/create-rollout) - Update the image for the `codefresh-guestbook` application to trigger a rollout. - -### Trigger/create a Delivery Pipeline -> Available for hybrid deployments. - -1. [Trigger the Hello World example pipeline]({{site.baseurl}}/docs/getting-started/quick-start/hello-world) - Configure the Git event to trigger the demo pipeline. -1. [Create a basic CI delivery pipeline]({{site.baseurl}}/docs/getting-started/quick-start/create-ci-pipeline) - Create a new CI delivery pipeline in Codefresh. - diff --git a/_docs/getting-started/quick-start/create-app-specs.md b/_docs/getting-started/quick-start/create-app-specs.md deleted file mode 100644 index 1c44be1f..00000000 --- a/_docs/getting-started/quick-start/create-app-specs.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: "Create and commit resources for application" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - -Now that you have created an application, you need to deploy the application. Let's deploy the `codefresh-guestbook` application by creating and commiting resources. -You will create and commit the following resources: -1. A folder in Git to save resources for the application -1. `Rollout` resource defining the deployment strategy -1. `Service` resource to expose the application to external traffic - -### Before you begin -* [Create an application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-ui) -* Make sure [Argo Rollouts is installed]({{site.baseurl}}/docs/deployment/install-argo-rollouts) on the target cluster - -### Create folder in Git for application resources -Create a folder in the Git repo in which to save all the resources for the `codefresh-guestbook` application. - -* In your Git repo, create a folder to store the resources needed to deploy the application. - For example, `/quick-start/` - -### Create rollout.yaml - -Create a rollout resource for the application you want to deploy. - - -To leverage Argo Rollouts' deployment capabilities, we are using the Argo's `rollout` resource instead of the native Kubernetes Deployment object. -For detailed information on the fields you can define, see [Argo Rollout specification](https://argoproj.github.io/argo-rollouts/features/specification/){:target="\_blank"}. - - -* In the Git repository create the `rollout.yaml` file, as in the example below. - - -```yaml -apiVersion: argoproj.io/v1alpha1 -kind: Rollout -metadata: - name: codefresh-guestbook-rollout -spec: - replicas: 4 - revisionHistoryLimit: 2 - selector: - matchLabels: - app: codefresh-guestbook - template: - metadata: - labels: - app: codefresh-guestbook - spec: - containers: - - image: gcr.io/heptio-images/ks-guestbook-demo:0.1 - name: codefresh-guestbook - ports: - - name: http - containerPort: 80 - protocol: TCP - minReadySeconds: 30 - strategy: - canary: - steps: - - setWeight: 25 - - pause: {duration: 20s} - - setWeight: 75 - - pause: {duration: 15s} -``` - -#### Fields in `rollout.yaml` - -{: .table .table-bordered .table-hover} -| Rollout Field | Notes | -| -------------- | -------------| -| `replicas` | When deployed, the rollout creates four replicas of the `codefresh-guestbook` application.| -| `revisionHistoryLimit` | The number of replica sets to retain. | -| `matchLabels` | The pods to select for this rollout. In our example, all pods with the label `codefresh-guestbook` are selected.| -| `image` | The container image for the application with the version tag, `gcr.io/heptio-images/ks-guestbook-demo:0.1` in our example.| -| `name` | The name of the application, `codefresh-guestbook` in our example. | -| `canary` | The deployment strategy, `canary` meaning that the traffic is gradually routed to the new application. Starting with `setWeight` of `25%` followed by a `pause` of 20 seconds, and the remaining `75%` after verification.| -| `templateName` | The analysis template used to validate the application metrics. Our example has the `background-analysis` template, and interfaces with Prometheus to monitor and validate metric thresholds.| - - -### Create a service resource -Create a service resource to expose your application to external traffic. - -* Create a `service.yaml` resource for the application you want to deploy, as in the example below. - > Create it in the same folder in which you saved `rollout.yaml`. - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: codefresh-guestbook-svc -spec: - ports: - - port: 8080 - targetPort: 80 - selector: - app: codefresh-guestbook # must be the same as the selector defined in rollouts.yaml - type: LoadBalancer -``` - -#### Fields in `service.yaml` - -{: .table .table-bordered .table-hover} -| Service field | Notes | -| -------------- | -------------- | -| `spec.ports` | The internal `port`, 8080 in our example, and external `targetPort`, 80 in our example.| -| `selector.app` | The pods to select, and MUST be identical to that defined in `rollouts.yaml`, `codefresh-guestbook` in our example.| - -### View application resources in Codefresh -Once you create and commit the `rollout` and `service` resources, return to the Applications dashboard. The Current State to see these resources. - -1. In the Codefresh UI, go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard?sort=desc-lastUpdated){:target="\_blank"}. -1. Select the application. - The Current State tab is now populated with the `rollout` and `service` resources you added. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-app-current-state.png" - url="/images/getting-started/quick-start/cdops-app-current-state.png" - alt="Current State with resources for application" - caption="Current State with resources for application" - max-width="70%" - %} - -### What to do next - -[(Optional) Update image tag for application]({{site.baseurl}}/docs/getting-started/quick-start/create-rollout) \ No newline at end of file diff --git a/_docs/getting-started/quick-start/create-app-ui.md b/_docs/getting-started/quick-start/create-app-ui.md deleted file mode 100644 index b2fdff6b..00000000 --- a/_docs/getting-started/quick-start/create-app-ui.md +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: "Create an application" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - -Let's start by creating a simple application, the `codefresh-guestbook` application in the Codefresh UI. -We'll create the application without resources and then define/add resources in the next step. - - -For detailed information, see [Create an application]({{site.baseurl}}/docs/deployment/create-application). - - -**How to** - - -1. In the Codefresh UI, go to the [Applications](https://g.codefresh.io/2.0/applications-dashboard?sort=desc-lastUpdated){:target="\_blank"} dashboard. -1. Select **Add Application** on the top-right. -1. In the Add Application panel, add definitions for the application: - * **Application name**: `codefresh-guestbook` for the quick start. - * **Runtime**: The runtime to associate with the application, `hosted-runtime` for the quick start. - * **Name for YAML file**: The name of the application's configuration manifest, assigned on commit to Git. By default, the manifest is assigned the application name. - You can click the Edit icon and change the name, if needed. - - >You cannot change the application definitions once you continue to the Configuration settings. - -{% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-add-app-settings.png" - url="/images/getting-started/quick-start/cdops-add-app-settings.png" - alt="Add Application panel" - caption="Add Application panel" - max-width="50%" - %} - -{:start="4"} -1. Select **Next** to go to the Configuration tab. - By default you are in Form mode. You can toggle between the Form and YAML modes as you define the application's configuration settings. -1. Define the **General** settings for the application: - * **Repository URL**: The URL to the repo in Git where you created the YAML resource files for the application. - * **Revision**: The branch in Git with the resource files. - * **Path**: The folder in Git with the resource files. - * **Namespace**: Optional. For the quick start, we'll create a namespace for the application, entitled `quick-start`. - * **Sync Policy**: Change to **Automatic**, and select **Prune resources** to automatically remove unused resources. - * **Sync Options**: If you defined a namespace, select **Auto-create namespace** to ensure that the namespace is created if it doesn't exist. - -{% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-add-app-configuration.png" - url="/images/getting-started/quick-start/cdops-add-app-configuration.png" - alt="Add Application Quick Start: General settings" - caption="Add Application Quick Start: General settings" - max-width="70%" - %} - - -{:start="6"} -1. Retain the default **Advanced Settings**. -1. To commit all your changes, select **Commit**. - The Commit form is displayed with the application's definitions on the left, and the read-only version of the manifest with the configuration settings you defined on the right. -1. Select the **Git Source** to which to commit. - -{% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-add-app-commit.png" - url="/images/getting-started/quick-start/cdops-add-app-commit.png" - alt="Add Application Quick Start: Commit to Git" - caption="Add Application Quick Start: Commit to Git" - max-width="70%" - %} - -{:start="9"} -1. Add a commit message and then select **Commit** at the bottom-right of the panel. - You are directed to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard?sort=desc-lastUpdated){:target="\_blank"}. - You may have to wait for a few seconds until the application is synced to the cluster. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-add-app-dashboard.png" - url="/images/getting-started/quick-start/cdops-add-app-dashboard.png" - alt="Application dashboard with new application" - caption="Application dashboard with new application" - max-width="70%" - %} - -{:start="10"} -1. Select the application. The Current State tab does not display any resources as we have not created any resources for the application. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-app-empty-current-state.png" - url="/images/getting-started/quick-start/cdops-app-empty-current-state.png" - alt="Empty Current State for new application" - caption="Empty Current State for new application" - max-width="70%" - %} - - -In the next task, you will create and commit resources for the `codefresh-guestbook` application and deploy the application. - - -### What to do next -[Create and commit resources for application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-specs/) diff --git a/_docs/getting-started/quick-start/create-ci-pipeline.md b/_docs/getting-started/quick-start/create-ci-pipeline.md deleted file mode 100644 index 5a297b78..00000000 --- a/_docs/getting-started/quick-start/create-ci-pipeline.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: "Create a basic CI delivery pipeline" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - -Now that you have configured and run the Hello World demo pipeline, let's create a more advanced pipeline. - -For the quick start, you'll create a basic CI Delivery Pipeline in Codefresh. - -The Delivery Pipeline: -* Clones a Git repository -* Builds a docker image using `kaniko` -* Pushes the built image to a Docker Registry -* Runs an example testing step -* Sends the image information to Codefresh - -Our CI pipeline interacts with third-party services such as GitHub and a Docker Registry. You need to first add secrets to the cluster to store the credentials required. - - -### Create a Personal Access Token (PAT) -You must have a PAT to clone the repository. - - -1. Create your PAT (Personal Access Token) with a valid `expiration` date and `scope`. - Scopes: `repo` and `admin-repo.hook` - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - url="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - alt="GitHub PAT permissions for CI pipeline" - caption="GitHub PAT permissions for CI pipeline" - max-width="30%" - %} - -{:start="2"} -1. Define your PAT and namespace by replacing the values in these commands: - -``` - export GIT_TOKEN=[PAT token] - export NAMESPACE=[Codefresh runtime namespace] -``` - -1. Create a generic Kubernetes secret with your PAT token: - -``` -kubectl create secret generic github-token \ - --from-literal=token=$GIT_TOKEN --dry-run=client \ - --save-config -o yaml | kubectl apply -f - -n $NAMESPACE -``` - -### Create Docker-registry secret -To push the image to a Docker registry, we'll need the credentials on our cluster. - -> The Docker registry secret is different from the general registry secret. - -1. Export the values for the Docker registry's `server`, `username`, `password`, `email`, and `namespace`: - -``` -export DOCKER_REGISTRY_SERVER=[Server] -export DOCKER_USER=[Username] -export DOCKER_PASSWORD=[Password] -export DOCKER_EMAIL=[Email] -export NAMESPACE=[Codefresh runtime namespace] -``` - -{:start="2"} -1. Create the secret: - -``` -kubectl create secret docker-registry \ - --docker-server=$DOCKER_REGISTRY_SERVER \ - --docker-username=$DOCKER_USER \ - --docker-password=$DOCKER_PASSWORD \ - --docker-email=$DOCKER_EMAIL -n $NAMESPACE -``` - - > In the Workflow Template, the Docker registry name defaults to `docker-config`. - - -### Create general registry secret -Create a general registry secret to send the image information to Codefresh. - -1. Export the values for your registry's `username`, `password`, `domain`, and `namespace`: - -``` -export USER=[Username] -export PASSWORD=[Password] -export DOMAIN=[Domain] -export NAMESPACE=[Codefresh runtime namespace] -``` - -{:start="2"} -1. Create the secret: - -``` -kubectl create secret generic registry-creds \ - --from-literal=username=$USER \ - --from-literal=password=$PASSWORD \ - --from-literal=domain=$DOMAIN \ - --dry-run=client --save-config -o yaml | kubectl apply -f - -n $NAMESPACE -``` - -### Create the CI delivery pipeline -Now that you have defined the secrets, create the CI delivery pipeline in Codefresh. - -1. In the UI, go to [Delivery Pipelines](https://g.codefresh.io/2.0/pipelines){:target="\_blank"}. -1. Select **+ Add Delivery Pipeline**. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-new-pipeline.png" - url="/images/getting-started/quick-start/quick-start-new-pipeline.png" - alt="Add Delivery Pipeline panel in Codefresh" - caption="Add Delivery Pipeline panel in Codefresh" - max-width="30%" - %} - -{:start="3"} -1. Enter a name for the delivery pipeline. - The name is created from the names of the sensor and the trigger event for the delivery pipeline. - * **Sensor Name**: The name of the sensor resource, for example, `sensor-cf-ci`. - * **Trigger Name**: The event configured in the sensor to trigger the Workflow Template, for example, `push-cf-ci`. -1. From the list of **Git Sources**, select the Git Source to which to commit the resources for this delivery pipeline. - > Do not select the marketplace Git Source as you cannot commit to it. - If you have multiple runtimes installed, the Git Source you select also determines the runtime that executes the pipeline. -1. Select **Next**. - In the **Configuration** tab, **Workflow Templates** is selected. This is our CI Starter Workflow Template, that builds a Docker image using Kaniko, reports image metadata to Codefresh, and tests the image. -1. Select **Trigger Conditions**. -1. From the **Add** dropdown, select **Git Events**. -1. In the **Git Repository URLs** field, select one or more GitHub repositories to listen to for the selected event. -1. From the **Event** dropdown, select the event, in our case, **Commit pushed**. - Codefresh displays all the **Arguments** available for the selected event. - You can map each argument to a single or combination of predefined variables, which Codefresh automatically maps to the correct path when you commit the changes. Argo Workflow then instantiates the values from the event payload. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-ci-pipeline-arguments.png" - url="/images/getting-started/quick-start/quick-start-ci-pipeline-arguments.png" - alt="Predefined variables for arguments" - caption="Predefined variables for arguments" - max-width="30%" - %} - - In each field, type `$` and from the list of predefined variables, select each of these in turn: - - * **REPO**: Required. The repository to clone during the build step. Select `Repository name`. - * **IMAGE_NAME**: Required. The name for the built image. Enter the name in the format `([docker_url]/[account]/[image_name]`. - * **TAG**: Optional. The tag for the built image. If not defined, uses the default tag `latest`. Enter `1.0`. - * **GIT_REVISION**: Optional. The Git revision to report to Codefresh. Select `Git revision`. - * **GIT_BRANCH**: Optional. The Git branch to report to Codefresh. Select `Git branch`. - * **GIT_COMMIT_URL**: Optional. The Git commit URL to report to Codefresh. Select `Commit url`. - * **GIT_COMMIT_MESSAGE**: Optional. The Git commit message to report to Codefresh. Select `Commit message`. - - You are now ready to commit the delivery pipeline to the Git Source. - -{:start="10"} -1. Select **Apply**, and then **Commit** on the top-right. - The Commit Changes panel shows the files to be committed. -1. Enter the commit message and then select **Commit**. -1. In the **Delivery Pipelines** page to which you are redirected, verify that your pipeline is displayed. - - Behind the scenes, we committed the pipeline to your Git repository and synced the resources to your cluster. - It may take a few seconds for the Git-to-cluster sync to complete, and then your pipeline should be displayed. - -### Trigger the pipeline with a Git commit event -Make a change to a file in the Git repository to trigger the pipeline. - -1. Go to the Git repository selected for the trigger condition. -1. Make a change to any file to get a commit event. -1. In the UI, go back to [Delivery Pipelines](https://g.codefresh.io/2.0/pipelines){:target="\_blank"} to see the new workflow for the pipeline. - -Continue to tweak the pipeline and enhance its capabilities. - - -### What to do next -If you have not created an application in Codefresh, continue with: - -[Create resources for codefresh-guestbook application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-specs) - diff --git a/_docs/getting-started/quick-start/create-github-action-ci.md b/_docs/getting-started/quick-start/create-github-action-ci.md deleted file mode 100644 index b95544b6..00000000 --- a/_docs/getting-started/quick-start/create-github-action-ci.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -title: "Connect a GitHub Action CI to enrich image" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- \ No newline at end of file diff --git a/_docs/getting-started/quick-start/create-rollout.md b/_docs/getting-started/quick-start/create-rollout.md deleted file mode 100644 index 763df47a..00000000 --- a/_docs/getting-started/quick-start/create-rollout.md +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: "Update image tag for application" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - -You will now make a change in the application manifest, and update the image tag. Because we selected auto-sync in the application settings, Argo CD detects that the live state in the cluster is out of sync with the desired state in Git, and triggers the new rollout. - -### Before you begin - -* [Create resources for application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-specs/) - - -### Update image tag in rollout.yaml -Update the image tag in the `codefresh-guestbook` application. - -1. Go to the Git repo with `rollout.yaml`. -1. Update the image tag from `0.1` to `0.2` as in the example below. - -```yaml -... -template: - metadata: - labels: - app: codefresh-guestbook - spec: - containers: - - image: gcr.io/heptio-images/ks-guestbook-demo:0.2 - name: codefresh-guestbook - ports: - - name: http - containerPort: 80 - protocol: TCP -... -``` -{:start="3"} -1. Commit the change. - -### View the rollout in the Applications dashboard -When the image tag is updated, the auto-sync initiates the rollout. - -1. Go back to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard?sort=desc-lastUpdated){:target="\_blank"}. -1. Select the application you created. - The deployment entry for the application is displayed as progressing. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-app-rollout-in-dashboard.png" - url="/images/getting-started/quick-start/cdops-app-rollout-in-dashboard.png" - alt="Application dashboard with rollout in progress" - caption="Application dashboard with rollout in progress" - max-width="60%" - %} - -{:start="3"} -1. To visualize the rollout analysis, click the rollout name. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-app-rollout-panel.png" - url="/images/getting-started/quick-start/cdops-app-rollout-panel.png" - alt="Rollout analysis in progress" - caption="Rollout analysis in progress" - max-width="60%" - %} - -{:start="4"} -1. To view metric validation details, expand **Background Analysis** in the panel. - -You have created and deployed an application in Codefresh. - - \ No newline at end of file diff --git a/_docs/getting-started/quick-start/hello-world.md b/_docs/getting-started/quick-start/hello-world.md deleted file mode 100644 index 02011074..00000000 --- a/_docs/getting-started/quick-start/hello-world.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "Trigger the Hello World example pipeline" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - -Now that you have successfully installed the hybrid runtime, you can trigger one of the Hello World demo pipelines included in the runtime package. -The two Hello World example pipelines are triggered by different event conditions: -* Git (GitHub) event -* Calendar (cron) event - -For the quick start, let's focus on the `github/hello-world` pipeline. - -### Create a PAT token -To commit resources for the `github/hello-world` pipeline, you need to add a PAT to Codefresh. - -1. Create your personal token with a valid `expiration` date and `scope` with `base64` encoding. - For the pipeline, you need `repo` and `admin-repo.hook` scopes: - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - url="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - alt="GitHub PAT permissions for Hello World pipeline" - caption="GitHub PAT permissions for Hello World pipeline" - max-width="30%" - %} - -{:start="2"} -1. In the Codefresh UI, go to [User Settings](https://g.codefresh.io/2.0/user-settings){:target="\_blank"}, add your token. - -### View pipelines -View the pipelines in Codefresh. - -1. In the Codefresh UI, go to [Delivery Pipelines](https://g.codefresh.io/2.0/pipelines){:target="\_blank"}. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-pipelines.png" - url="/images/getting-started/quick-start/quick-start-pipelines.png" - alt="Demo pipelines in the Pipelines page" - caption="Demo pipelines in the Pipelines page" - max-width="30%" - %} - - * The `github/hello-world` pipeline has not been triggered as it requires a Git event to trigger it. - * The `cron/hello-world` pipeline shows statistics as it has already been triggered based on the `cron` interval. - -### View and update manifest -As we don't have a workflow for this pipeline, you will configure the Git Source resource in the pipeline's **Manifest** tab. -1. In the **Pipelines** page, to drill down, select the pipeline name. -1. Select the **Manifest** tab, and click the arrowhead to expand the resource view. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-manifest-expand.png" - url="/images/getting-started/quick-start/quick-start-manifest-expand.png" - alt="Expand resource view in Mainfests tab" - caption="Expand resource view in Mainfests tab" - max-width="30%" - %} - - You can see these resources: - - * Event Source (`event-source.git-source.yaml`). - * Sensor (`sensor.git-source.yaml`) - * Workflow Template (`workflow-template.hellow-world.yaml`) - - - > The pipeline is configured to run on a `PUSH` event in the Git repository. - - -Codefresh does the following: -* Commits the changes to your Git repository. -* Synchronizes the changes in Git back to your cluster, and updates the `event-source.git-source` resource. -* Triggers this pipeline after the `PUSH` event to your repository. -* Creates a workflow. View it in the UI, in the [Workflows](https://g.codefresh.io/2.0/workflows){:target="\_blank"} dashboard. - Select view workflow details to see the workflow log. - -### What to do next -[Create a basic CI pipeline]({{site.baseurl}}/docs/getting-started/quick-start/create-ci-pipeline) diff --git a/_docs/getting-started/quick-start/install-hosted.md b/_docs/getting-started/quick-start/install-hosted.md deleted file mode 100644 index 8d3c6fbf..00000000 --- a/_docs/getting-started/quick-start/install-hosted.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Provision a hosted runtime" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - -If you have Hosted GitOps, set up your hosted runtime environment: - -1. Provision the hosted runtime with a single click -1. Authorize access through your OAuth token to the organization where Codefresh creates the Git runtime repo and the shared configuration repo -1. Connect to an external K8s cluster with access to the internet, to which you can deploy applications -1. Install Argo Rollouts on the cluster - -Read our [blog on Hosted GitOps](https://codefresh.io/blog/codefresh-upends-continuous-delivery-with-hosted-gitops-platform-featuring-dora-dashboards-and-first-class-integrations-for-ci/). -For detailed information on each of the steps below, see [Set up a hosted runtime environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -**Before you begin** - -Verify the following: -* If you have hybrid runtimes installed, make sure you have latest version of the CLI - * Check version: - `cf version` - To compare with the latest version from Codefresh, [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"}. - * [Download the CLI]({{site.baseurl}}/docs/clients/csdp-cli/). -* Kubernetes cluster with access to the internet -* OAuth token - -**How to** -1. In the Codefresh UI, go to Codefresh [Home](https://g.codefresh.io/2.0/?time=LAST_7_DAYS){:target="\_blank"}. - -{% include -image.html -lightbox="true" -file="/images/runtime/hosted-initial-view.png" -url="/images/runtime/hosted-initial-view.png" -alt="Hosted GitOps setup" -caption="Hosted GitOps setup" -max-width="80%" -%} - -{:start="2"} -1. Provision the hosted runtime: - * Click **Install**, and wait for Codefresh to complete provisioning your hosted runtime (may take up to ten minutes). - -{% include -image.html -lightbox="true" -file="/images/runtime/hosted-installing.png" -url="/images/runtime/hosted-installing.png" -alt="Installing hosted runtime" -caption="Installing hosted runtime" -max-width="80%" -%} - -{:start="3"} -1. Select the Git organization for the runtime installation and shared configuration repos: - * Click **Connect**. - * Click **Authorize Access** and enter your OAuth token. - * Select the **Git Organization for which to create the repos**. - * Click **Create**. - Codefresh creates the two Git repositories in the paths shown. - - {% include -image.html -lightbox="true" -file="/images/runtime/hosted-connect-git.png" -url="/images/runtime/hosted-connect-git.png" -alt="Connect to Git provider" -caption="Connect to Git provider" -max-width="80%" -%} - -{:start="4"} -1. Connect a K8s cluster: - * Click **Connect**. - * In the Add Managed Cluster panel, copy the command `cf cluster add`, and run it in the terminal. - * When prompted to select the `kube-context`, select from the list of available clusters as defined in `kubeconfig`. - * Verify that you have configured access to the required IP addresses required. See [Codefresh IP addresses]({{site.baseurl}}/docs/administration/platform-ip-addresses/). - -{% include -image.html -lightbox="true" -file="/images/runtime/hosted-connect-cluster-step.png" -url="/images/runtime/hosted-connect-cluster-step.png" -alt="Connect a K8s cluster for hosted runtime" -caption="Connect a K8s cluster for hosted runtime" -max-width="70%" -%} - -1. Install Argo Rollouts on the cluster you added. You'll need this to apply the `rollout` resource we will create for the application in the next task. - * Go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. - * Select **Topology View**. - * Select the target cluster, and then select **+ Install Argo Rollouts**. - -{% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/cdops-app-install-rollout.png" - url="/images/getting-started/quick-start/cdops-app-install-rollout.png" - alt="Install Argo Rollouts on managed cluster" - caption="Install Argo Rollouts on managed cluster" - max-width="50%" - %} - -### What to do next -[Create resources for codefresh-guestbook application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-specs) - diff --git a/_docs/getting-started/quick-start/runtime.md b/_docs/getting-started/quick-start/runtime.md deleted file mode 100644 index 9ac99d86..00000000 --- a/_docs/getting-started/quick-start/runtime.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: "Install a hybrid runtime" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - - -Install the hybrid runtime on your K8s cluster. Installing the hybrid runtime installs Argo-project and Codefresh-specific components. The Argo Project is an enterprise-supported version of the Argo CD components, derived from a conformed fork of the Argo ecosystem. - -### About hybrid runtime installation -Installing a hybrid runtime includes installing the: -1. Codefresh CLI. -2. Codefresh hybrid runtime from the CLI in a specific namespace on your cluster. - Every hybrid runtime installation makes commits to two Git repos: - * Runtime installation repo: The installation repo that manages the runtime itself with Argo CD. If the repo URL you provide does not exist, the runtime creates it automatically. - * Git Source repo: Created automatically during runtime installation. The repo with the demo resources required for the sample `Hello World` pipelines we provide. - * Shared configuration repo: A repository that stores configuration manifests shared across runtimes. - -### Before you begin -A hybrid runtime requires a Git token for authentication to the Git installation repository. -Have your GitHub Personal Authentication Token (PAT) ready with a valid expiration date and access permissions: -* Expiration: Either the default of 30 days or any duration you consider logical. -* Access scopes: Set to `repo` and `admin-repo.hook` - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - url="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - alt="GitHub PAT permissions" - caption="GitHub PAT permissions" - max-width="30%" - %} - - If you need detailed information on GitHub tokens, see the [GitHub article](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token). - -### Download the Codefresh CLI -Downloading the Codefresh CLI requires you to select the download mode and OS, generate an API key, and authentication context, as instructed in the UI. -1. In the Welcome page, select **+ Install Runtime**. -1. Download the Codefresh CLI: - * Select one of the methods. - * Generate the API key and create the authentication context. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-download-cli.png" - url="/images/getting-started/quick-start/quick-start-download-cli.png" - alt="Download Codefresh CLI to install runtime" - caption="Download Codefresh CLI to install runtime" - max-width="30%" - %} -### Install hybrid runtime -For the quick start, install the hybrid runtime through the Codefresh CLI that you downloaded previously. - -1. To start runtime installation, run `cf runtime install`. - >If you don't have a valid SSL certificate for the Ingress controller, and want to continue with the installation, add the `--insecure` flag to the runtime command. -1. Follow the prompts in the CLI wizard to complete the installation: - * **Runtime name**: The name of your runtime, starting with a lower-case character, and including up to 63 characters and numbers. Example: `codefreshproduction` - * **Select Kube context**: Your current context is highlighted. Press Enter to select it, or use the arrow keys to select a different context. - * **Ingress class**: Select the ingress class for runtime installation from the list displayed. - * **Ingress host**: Displays the NGINX host, either from the cluster or the NGINX ingress controller associated with the **Ingress class**. - * **Repository URL**: The GitHub repo for the installation definitions, in the format `https://github.com/[user-or-org-name]/[repo_name]`. Example: `https//:github.com/codefresh/cf_production_install` - * **Git runtime token**: The GitHub PAT for access to the installation repo. - * **Install Codefresh demo resources?** Press Enter to confirm. Demo resources are saved in a new Git Source repo, created by Codefresh. They include resources for two Hello World pipelines, one with a Cron trigger condition, and the other with a Git event trigger condition. - * **Do you wish to continue with runtime install?** Press Enter to confirm and start runtime installation. -1. Wait for the runtime installed successfully message. - -### Validate successful installation -The **Runtimes** dashboard shows the hybrid runtime you just installed. You can drill down into the runtime to see its components and Git Sources. - -1. In the Codefresh UI, go to the [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"} dashboard. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-runtime-dashboard.png" - url="/images/getting-started/quick-start/quick-start-runtime-dashboard.png" - alt="Runtime dashboard after successful installation" - caption="Runtime dashboard after successful installation" - max-width="30%" - %} - -{:start="2"} -1. Select the runtime name to drill down, and then select the tabs to view the runtime components and Git Sources. - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-runtime-components.png" - url="/images/getting-started/quick-start/quick-start-runtime-components.png" - alt="Runtime components tab" - caption="Runtime Components tab" - max-width="30%" - %} - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-git-source.png" - url="/images/getting-started/quick-start/quick-start-git-source.png" - alt="Git Source tab" - caption="Git Source tab" - max-width="30%" - %} - -### What to do next -[Create resources for codefresh-guestbook application]({{site.baseurl}}/docs/getting-started/quick-start/create-app-specs) -OR -[Trigger the Hello World example pipeline]({{site.baseurl}}/docs/getting-started/quick-start/hello-world) diff --git a/_docs/getting-started/quick-start/verify-requirements.md b/_docs/getting-started/quick-start/verify-requirements.md deleted file mode 100644 index 3fc48c3e..00000000 --- a/_docs/getting-started/quick-start/verify-requirements.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "Prepare for hybrid runtime installation" -description: "" -group: getting-started -sub-group: quick-start -toc: true ---- - - -**New installation** -If this is your first time installing Codefresh, review and confirm that your deployment environment conforms to the minimum requirements for hybrid runtime installation. Check the [system requirements]({{site.baseurl}}/docs/runtime/requirements). - -**Existing installation** -If you already have a hybrid runtime installation on your cluster, you have two options: -1. To install on the same cluster, first uninstall the existing hybrid runtime. Currently, you can install a single hybrid runtime per cluster. -1. Install on a different cluster, verifying that you meet the minimum requirements. - -**Uninstallation tips for existing runtimes** -* Before you run uninstall an existing hybrid runtime from the Codefresh UI, or run `cf runtime uninstall` from the CLI, _delete_ all Codefresh-related namespaces. -* If a namespace is frozen in the `Terminating` status, it could be because the namespace has resources with finalizers that are preventing deletion. - Here's how you can remove finalizers using `k9s`: - * In the `applications` view, do the following for each application: - * Hit `e` to edit the YAML. - * Scroll down to the section entitled `finalizers`. - * Move cursor to the line with the finalizer definition, and then hit `dd` to delete the line. - * Delete also the `finalizers` key. - * To save and exit, hit `escape` `wq:` `enter`. - * Try deleting the namespace again. - -### What to do next -[Install a hybrid runtime]({{site.baseurl}}/docs/getting-started/quick-start/runtime) diff --git a/_docs/incubation/intro-hosted-runtime.md b/_docs/incubation/intro-hosted-runtime.md deleted file mode 100644 index c2a66b69..00000000 --- a/_docs/incubation/intro-hosted-runtime.md +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: "Hosted GitOps" -description: "" -group: incubation -toc: true ---- - - -Codefresh has enhanced our solution offering with Hosted GitOps, the SaaS version of Codefresh. - -What do you get with Hosted GitOps? -In a nutshell, a hosted and managed version of Argo CD. From application analytics, to application creation, rollout, and deployment, you get the best of both worlds: Argo CD with unique features and functionality from Codefresh to help achieve your CD goals. -What it also means is easy set up and zero maintenance overhead. - -Read on for more details. And check out our [blog](https://codefresh.io/blog/codefresh-upends-continuous-delivery-with-hosted-gitops-platform-featuring-dora-dashboards-and-first-class-integrations-for-ci/). - -### Hosted runtimes - -Setting up your hosted environment takes just a few clicks. All you need is a Codefresh account, a Git account, and a Kubernetes cluster to which to deploy your applications. -Codefresh guides you through the simple three-step process of provisioning your hosted runtime. From that point, Codefresh handles administration and maintenance of the hosted runtime, including version and security updates. - -See [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -{% include - image.html - lightbox="true" - file="/images/runtime/intro-hosted-hosted-initial-view.png" - url="/images/runtime/intro-hosted-hosted-initial-view.png" - alt="Hosted runtime setup" - caption="Hosted runtime setup" - max-width="80%" -%} - -### Global deployment analytics - -The Home dashboard presents enterprise-wide deployment highlights, making it a useful management tool. - -Get insights into important KPIs and deployments, across runtimes and clusters, all in the same location. View status of runtimes and managed clusters, deployments, failed deployments with rollbacks, most active applications. Use filters to narrow the scope to focus on anything specific. - -{% include - image.html - lightbox="true" - file="/images/incubation/home-dashboard.png" - url="/images/incubation/home-dashboard.png" - alt="Global deployment analytics" - caption="Global deployment analytics" - max-width="80%" -%} - -### Application analytics and analysis - -The Applications dashboard displays applications across runtimes and clusters, from which you can select and analyze individual applications. Individual application information is grouped by current and historical deployments, enriched with Argo, Jira, and Git details, including rollout visualizations for ongoing deployments (Timeline tab), and an interactive tree view of application resources (Current State tab). - -See [Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/). - -{% include - image.html - lightbox="true" - file="/images/applications/app-dashboard-main-view.png" - url="/images/applications/app-dashboard-main-view.png" - alt="Applications dashboard" - caption="Applications dashboard" - max-width="80%" -%} - -### DORA metrics - -DORA metrics has become integral to enterprises wanting to quantify DevOps performance, and Codefresh has out-of-the-box support for it. - -Apart from the metrics themselves, the DORA dashboard in Codefresh has several features such as the Totals bar with key metrics, filters that allow you to pinpoint just which applications or runtimes are contributing to problematic metrics, and the ability to set a different view granularity for each DORA metric. - -See [DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/). - -{% include - image.html - lightbox="true" - file="/images/incubation/intro-dora-metrics.png" - url="/images/incubation/intro-dora-metrics.png" - alt="DORA metrics" - caption="DORA metrics" - max-width="60%" -%} - -### Application management - -Manage the application lifecycle in the Codefresh UI, from creating, editing, and deleting them. - -Define all application settings in a single location through the intuitive Form mode or directly in YAML, and commit all changes to Git. -For easy access, the configuration settings are available for editing in the Applications dashboard. - -See [Applications]({{site.baseurl}}/docs/deployment/create-application/). - -{% include - image.html - lightbox="true" - file="/images/applications/add-app-general-settings.png" - url="/images/applications/add-app-general-settings.png" - alt="Application creation in Codefresh" - caption="Application creation in Codefresh" - max-width="60%" -%} - -### Third-party CI integrations - -If you have your own tools for CI pipelines and workflows, Hosted GitOps gives you the option to connect them to Codefresh and enrich deployment information with our new report image template. For example, you can add the report image step in your GitHub Actions pipeline and reference the different integrations for Codefresh to retrieve and enrich the image information. - -* Git PRs (Pull Requests), Commits, and Committer information directly from the code repositories -* Jira ticket information for correlation with deployed features -* Docker Hub or Quay for image information - -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). - -{% include - image.html - lightbox="true" - file="/images/incubation/github-action-int-settings.png" - url="/images/incubation/github-action-int-settings.png" - alt="Image enrichment with GitHub Actions integration" - caption="Image enrichment with GitHub Actions integration" - max-width="60%" -%} - -### Hosted vs. hybrid environments - -The table below highlights the main differences between hosted and hybrid environments. - -{: .table .table-bordered .table-hover} -| Functionality |Feature | Hosted | Hybrid | -| -------------- | --------------|--------------- | --------------- | -| Runtime | Installation | Provisioned by Codefresh | Provisioned by customer | -| | Runtime cluster |Managed by Codefresh | Managed by customer | -| | Number per account | Only one runtime | Multiple runtimes | -| | Upgrade | Managed by Codefresh | Managed by customer | -| | External cluster | Managed by customer | Managed by customer | -| | Uninstall | Managed by customer | Managed by customer | -| Argo CD | | Codefresh cluster | Customer cluster | -| CI Ops | Delivery Pipelines |Not supported | Supported | -| |Workflows | Not supported | Supported | -| |Workflow Templates | Not supported | Supported | -| CD Ops |Applications | Supported | Supported | -| |Image enrichment | Supported | Supported | -| | Rollouts | Supported | Supported | -|Integrations | | Supported | Supported | -|Dashboards |Home Analytics | Hosted runtime and deployments|Runtimes, deployments, Delivery Pipelines | -| |DORA metrics | Supported |Supported | -| |Applications | Supported |Supported | diff --git a/_docs/installation/codefresh-on-prem-upgrade.md b/_docs/installation/codefresh-on-prem-upgrade.md new file mode 100644 index 00000000..335b3075 --- /dev/null +++ b/_docs/installation/codefresh-on-prem-upgrade.md @@ -0,0 +1,575 @@ +--- +title: "Codefresh On-Premises Upgrade" +description: "Use the Kubernetes Codefresh Installer to upgrade your Codefresh On-Premises platform " +group: installation +redirect_from: + - /docs/enterprise/codefresh-on-prem-upgrade/ +toc: true +--- +Upgrade the Codefresh On-premises platform to the latest version: +* Prepare for the upgrade: _Before_ the upgrade, based on the version you are upgrading to, complete the required tasks +* Upgrade On-premises +* Complete post-upgrade configuration: If needed, also based on the version you are upgrading to, complete the required tasks + + +## Upgrade to 1.1.1 +Prepare for the upgrade to v1.1.1 by performing the tasks listed below. + +### Maintain backward compatibility for infrastructure services +If you have Codefresh version 1.0.202 or lower installed, and are upgrading to v1.1.1, to retain the existing images for the services listed below, update the `config.yaml` for `kcfi`. + +* `cf-mongodb` +* `cf-redis` +* `cf-rabbitmq` +* `cf-postgresql` +* `cf-nats` +* `cf-consul` + +> In the `config.yaml`, as in the example below, if needed, replace the `bitnami` prefix with that of your private repo. + +```yaml +... + +global: + ### Codefresh App domain name. appUrl is manadatory parameter + appUrl: onprem.mydomain.com + appProtocol: https + + mongodbImage: bitnami/mongodb:3.6.13-r0 # (default `mongodbImage: bitnami/mongodb:4.2`) + +mongodb: + image: bitnami/mongodb:3.6.13-r0 # (default `image: bitnami/mongodb:4.2`) + podSecurityContext: + enabled: true + runAsUser: 0 + fsGroup: 0 + containerSecurityContext: + enabled: false + +redis: + image: bitnami/redis:3.2.9-r2 # (default `image: bitnami/redis:6.0.16`) + podSecurityContext: + enabled: false + containerSecurityContext: + enabled: false + +postgresql: + imageTag: 9.6.2 # (default `imageTag:13`) + +nats: + imageTag: 0.9.4 # (default `imageTag:2.7`) + +consul: + ImageTag: 1.0.0 # (default `imageTag:1.11`) +... +``` +## Upgrade to 1.2.0 and higher +This major release **deprecates** the following Codefresh managed charts: +* Ingress +* Rabbitmq +* Redis + +See the instructions below for each of the affected charts. + +> Before the upgrade remove any seed jobs left from previous release with: + `kubectl delete job --namespace ${CF_NAMESPACE} -l release=cf ` + +> Before the upgrade remove PDBs for Redis and RabbitMQ left from previous release with: + `kubectl delete pdb cf-rabbitmq --namespace ${CF_NAMESPACE}`
      + `kubectl delete pdb cf-redis --namespace ${CF_NAMESPACE}` + +### Update configuration for Ingress chart +From version **1.2.0 and higher**, we have deprecated support for `Codefresh-managed-ingress`. +Kubernetes community public `ingress-nginx` chart replaces `Codefresh-managed-ingress` chart. For more information on the `ingress-nginx`, see [kubernetes/ingress-nginx](https://github.com/kubernetes/ingress-nginx). + +> Parameter locations have changed as the ingress chart name was changed from `ingress` to `ingress-nginx`: + **NGINX controller** parameters are now defined under `ingress-nginx` + **Ingress object** parameters are now defined under `ingress` + +You must update `config.yaml`, if you are using: +* External ingress controllers, including ALB (Application Load Balancer) +* Codefresh-managed ingress controller with _custom_ values + +#### Update configuration for external ingress controllers + +For external ingress controllers, including ALB (Application Load Balancer), update the relevant sections in `config.yaml` to align with the new name for the ingress chart: + +* Replace `ingress` with `ingress-nginx` + +*v1.1.1 or lower* +```yaml +ingress: #disables creation of both Nginx controller deployment and Ingress objects + enabled: false +``` + +*v1.2.2 or higher* +```yaml +ingress-nginx: #disables creation of Nginx controller deployment + enabled: false + +ingress: #disables creation of Ingress objects (assuming you've manually created ingress resource before) + enabled: false +``` + +* Replace `annotations` that have been deprecated with `ingressClassName` + +*v1.1.1 or lower* +```yaml +ingress: + annotations: + kubernetes.io/ingress.class: my-non-codefresh-nginx +``` + +*v1.2.2 or higher* +```yaml +ingress-nginx: + enabled: false + +ingress: + ingressClassName: my-non-codefresh-nginx +### `kubernetes.io/ingress.class` annotation is deprecated from kubernetes v1.22+. +# annotations: +# kubernetes.io/ingress.class: my-non-codefresh-nginx +``` + +#### Update configuration for Codefresh-managed ingress with custom values + +If you were running `Codefresh-managed ingress` controller with _custom_ values refer to [values.yaml](https://github.com/kubernetes/ingress-nginx/blob/main/charts/ingress-nginx/values.yaml) from the official repo. If needed, update the `ingress-nginx` section in `config.yaml`. The example below shows the default values (already provided in Codefresh chart) for `ingress-nginx`: + +```yaml +ingress-nginx: + enabled: true + controller: + ## This section refers to the creation of the IngressClass resource + ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19 + ingressClassResource: + # -- Is this ingressClass enabled or not + enabled: true + # -- Is this the default ingressClass for the cluster + default: false + # -- Controller-value of the controller that is processing this ingressClass + controllerValue: "k8s.io/ingress-nginx-codefresh" + # -- Name of the ingressClass + name: nginx-codefresh + # -- For backwards compatibility with ingress.class annotation. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx-codefresh + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: true + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: true + admissionWebhooks: + enabled: false +``` +> New `ingress-nginx` subchart creates a new `cf-ingress-nginx-controller` service (`type: LoadBalancer`) instead of old `cf-ingress-controller` service. So make sure to update DNS record for `global.appUrl` to point to a new external load balancer IP. + You can get external load balancer IP with: + `kubectl get svc cf-ingress-nginx-controller -o jsonpath={.status.loadBalancer.ingress[0].ip` + + +### Update configuration for RabbitMQ chart +From version **1.2.2 and higher**, we have deprecated support for the `Codefresh-managed Rabbitmq` chart. Bitnami public `bitnami/rabbitmq` chart has replaced the `Codefresh-managed rabbitmq`. For more information, see [bitnami/rabbitmq](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq). + +> Configuration updates are not required if you are running an **external** RabbitMQ service. + +> RabbitMQ chart was replaced so as a consequence values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/rabbitmq/values.yaml) + +**`existingPvc` changed to `existingClaim` and defined under `persistence`** + +*v1.1.1 or lower* +```yaml +rabbitmq: + existingPvc: my-rabbitmq-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +*v1.2.2 or higher* +```yaml +rabbitmq: + volumePermissions: ## Enable init container that changes the owner and group of the persistent volume from existing claim + enabled: true + persistence: + existingClaim: my-rabbitmq-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 500m + memory: 1Gi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +**`storageClass` and `size` defined under `persistence`** + +*v1.1.1 or lower* +```yaml +rabbitmq: + storageClass: my-storage-class + storageSize: 32Gi +``` + +*v1.2.2 or higher* +```yaml +rabbitmq: + persistence: + storageClass: my-storage-class + size: 32Gi +``` + +### Update configuration for Redis chart +From version **1.2.2 and higher**, we have deprecated support for the `Codefresh-managed Redis` chart. Bitnami public `bitnami/redis` chart has replaced the `Codefresh-managed Redis` chart. For more information, see [bitnami/redis](https://github.com/bitnami/charts/tree/master/bitnami/redis). + +Redis storage contains **CRON and Registry** typed triggers so you must migrate existing data from the old deployment to the new stateful set. +This is done by backing up the existing data before upgrade, and then restoring the backed up data after upgrade. + +> Configuration updates are not required: + * When running an **external** Redis service. + * If CRON and Registy triggers have not been configured. + +#### Verify existing Redis data for CRON and Registry triggers +Check if you have CRON and Registry triggers configured in Redis. + +* Run `codefresh get triggers` + OR + Directly from the K8s cluster where Codefresh is installed. + +```shell +NAMESPACE=codefresh +REDIS_PASSWORD=$(kubectl get secret --namespace $NAMESPACE cf-redis -o jsonpath="{.data.redis-password}" | base64 --decode) + +kubectl exec -it deploy/cf-redis -- env REDIS_PASSWORD=$REDIS_PASSWORD bash +#once inside cf-redis pod +REDISCLI_AUTH="$REDIS_PASSWORD" redis-cli +info keyspace # list db +select 15 # select db 15 +keys * #show keys +``` + +* If there are results, continue with _Back up existing Redis data_. + +#### Back up existing Redis data +Back up the existing data before the upgrade: + +* Connect to the pod, run `redis-cli`, export AOF data from old `cf-redis-*` pod: + +```shell +NAMESPACE=codefresh +REDIS_PASSWORD=$(kubectl get secret --namespace $NAMESPACE cf-redis -o jsonpath="{.data.redis-password}" | base64 --decode) +REDIS_POD=$(kubectl get pods -l app=cf-redis -o custom-columns=:metadata.name --no-headers=true) +kubectl cp $REDIS_POD:/bitnami/redis/data/appendonly.aof appendonly.aof -c cf-redis +``` + +#### Restore backed-up Redis data +Restore the data after the upgrade: + +* Copy `appendonly.aof` to the new `cf-redis-master-0` pod: + + ```shell + kubectl cp appendonly.aof cf-redis-master-0:/data/appendonly.aof + ```` +* Restart `cf-redis-master-0` and `cf-api` pods: + + ```shell + kubectl delete pod cf-redis-master-0 + + kubectl scale deployment cf-cfapi-base --replicas=0 -n codefresh + kubectl scale deployment cf-cfapi-base --replicas=2 -n codefresh + ``` + +> Redis chart was replaced so as a consequence values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/redis/values.yaml). + +**`existingPvc` changed to `existingClaim` and defined under `master.persistence`** + +*v1.1.1 or lower* +```yaml +redis: + existingPvc: my-redis-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +*v1.2.2 or higher* +```yaml +redis: + volumePermissions: ## Enable init container that changes the owner and group of the persistent volume from existing claim + enabled: true + master: + persistence: + existingClaim: my-redis-pvc + nodeSelector: + foo: bar + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 500m + memory: 500Mi + tolerations: + - effect: NoSchedule + key: + operator: Equal + value: +``` + +**`storageClass` and `size` defined under `master.persistence`** + + +*v1.1.1 or lower* +```yaml +redis: + storageClass: my-storage-class + storageSize: 32Gi +``` + +*v1.2.2 or higher* +```yaml +redis: + master: + persistence: + storageClass: my-storage-class + size: 32Gi +``` + +> If you run the upgrade without redis backup and restore procedure, **Helm Releases Dashboard** page might be empty for a few minutes after the upgrade. + +## Upgrade to 1.3.0 and higher +This major release **deprecates** the following Codefresh managed charts: +* Consul +* Nats + +### Update configuration for Consul +From version **1.3.0 and higher**, we have deprecated the Codefresh-managed `consul` chart, in favor of Bitnami public `bitnami/consul` chart. For more information, see [bitnami/consul](https://github.com/bitnami/charts/tree/master/bitnami/consul). + +Consul storage contains data about **Windows** worker nodes, so if you had any Windows nodes connected to your OnPrem installation, see the following instruction: + +> Use `https:///admin/nodes` to check for any existing Windows nodes. + +#### Back up existing consul data +_Before starting the upgrade_, back up existing data. + +> Because `cf-consul` is a StatefulSet and has some immutable fields in its spec with both old and new charts having the same names, you cannot perform a direct upgrade. + Direct upgrade will most likely fail with: + `helm.go:84: [debug] cannot patch "cf-consul" with kind StatefulSet: StatefulSet.apps "cf-consul" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', 'updateStrategy' and 'minReadySeconds' are forbidden` + After backing up existing data, you must delete the old StatefulSet. + + +1. Exec into the consul pod and create a snapshot: +```shell +kubectl exec -it cf-consul-0 -n codefresh -- consul snapshot save backup.snap +``` +1. Copy snapshot locally: +```shell +kubectl cp -n codefresh cf-consul-0:backup.snap backup.snap +``` +1. **Delete the old** `cf-consul` stateful set: + +```shell +kubectl delete statefulset cf-consul -n codefresh +``` + +#### Restore backed up data + +After completing the upgrade to the current version, restore the `consul` data that you backed up. + +1. Copy the snapshot back to the new pod: + +```shell +kubectl cp -n codefresh backup.snap cf-consul-0:/tmp/backup.snap +``` +1. Restore the data: +``` +kubectl exec -it cf-consul-0 -n codefresh -- consul snapshot restore /tmp/backup.snap +``` +> Consul chart was replaced, and values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/consul/values.yaml) + + +### Update Nats configuration +From version **1.3.0 and higher**, we have deprecated Codefresh-managed `nats` chart in favor of Bitnami public `bitnami/nats` chart. For more information, see [bitnami/nats](https://github.com/bitnami/charts/tree/master/bitnami/consul). + +> Because `cf-nats` is a StatefulSet and has some immutable fields in its spec, both the old and new charts have the same names, preventing a direct upgrade. + Direct upgrade will most likely fail with: + `helm.go:84: [debug] cannot patch "cf-nats" with kind StatefulSet: StatefulSet.apps "cf-nats" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', 'updateStrategy' and 'minReadySeconds' are forbidden` + After backing up existing data, you must delete the old StatefulSet. + + +* **Delete the old** `cf-nats` stateful set. + +```shell +kubectl delete statefulset cf-nats -n codefresh +``` + +> Nats chart was replaced, and values structure might be different for some parameters. + For the complete list of values, see [values.yaml](https://github.com/bitnami/charts/blob/master/bitnami/nats/values.yaml). + +### Upgrade to 1.3.1 and higher + +Chart **v1.3.1** fixes duplicated env vars `CLUSTER_PROVIDERS_URI` and `CLUSTER_PROVIDERS_PORT` in `cf-api` deployment. +```yaml +W1010 03:03:55.553842 280 warnings.go:70] spec.template.spec.containers[0].env[94].name: duplicate name "CLUSTER_PROVIDERS_URI" +W1010 03:03:55.553858 280 warnings.go:70] spec.template.spec.containers[0].env[95].name: duplicate name "CLUSTER_PROVIDERS_PORT" +``` + + +> Due to Helm issue [Removal of duplicate array entry removes completely from Kubernetes](https://github.com/helm/helm/issues/10741), you shoud run `kcfi deploy` or `helm upgrade` two times consecutively. + + +With chart **v1.3.1** [insecure registy](https://docs.docker.com/registry/insecure/) property has been moved under `builder` section: + +```yaml +builder: + insecureRegistries: + - "myregistrydomain.com:5000" +``` + +## Upgrade the Codefresh Platform with [kcfi](https://github.com/codefresh-io/kcfi) + +1. Locate the `config.yaml` file you used in the initial installation. +1. Change the release number inside it. + ```yaml + metadata: + kind: codefresh + installer: + type: helm + helm: + chart: codefresh + repoUrl: https://chartmuseum.codefresh.io/codefresh + version: 1.2.14 + ``` +1. Perform a dry run and verify that there are no errors: + `kcfi deploy --dry-run --debug -c codefresh/config.yaml` +1. Run the actual upgrade: + `kcfi deploy --debug -c codefresh/config.yaml` +1. Verify that all the pods are are in running state: + `kubectl -n codefresh get pods --watch` +1. Log in to the Codefresh UI, and check the new version. +1. If needed, enable/disable new feature flags. + +## Codefresh with Private Registry + +If you install/upgrade Codefresh on the air-gapped environment (without access to public registries or Codefresh Enterprise registry) you will have to copy the images to your organization container registry. + +**Obtain [image list](https://github.com/codefresh-io/onprem-images/tree/master/releases) for specific release** + +**Push images to private docker registry** + +There are 3 types of images: + +> localhost:5000 is your + +- non-Codefresh like: +``` +bitnami/mongo:4.2 +k8s.gcr.io/ingress-nginx/controller:v1.2.0 +postgres:13 +``` +convert to: +``` +localhost:5000/bitnami/mongodb:4.2 +localhost:5000/ingress-nginx/controller:v1.2.0 +localhost:5000/postgres:13 +``` +- Codefresh public images like: +``` +quay.io/codefresh/dind:20.10.13-1.25.2 +quay.io/codefresh/engine:1.147.8 +quay.io/codefresh/cf-docker-builder:1.1.14 +``` +convert to: +``` +localhost:5000/codefresh/dind:20.10.13-1.25.2 +localhost:5000/codefresh/engine:1.147.8 +localhost:5000/codefresh/cf-docker-builder:1.1.14 +``` +- Codefresh private images like: +``` +gcr.io/codefresh-enterprise/codefresh/cf-api:21.153.6 +gcr.io/codefresh-enterprise/codefresh/cf-ui:14.69.38 +gcr.io/codefresh-enterprise/codefresh/pipeline-manager:3.121.7 +``` +convert to: +``` +localhost:5000/codefresh/cf-api:21.153.6 +localhost:5000/codefresh/cf-ui:14.69.38 +localhost:5000/codefresh/pipeline-manager:3.121.7 +``` +> DELIMITERS are codefresh OR codefresh-io + +- To push images via [kcfi](https://github.com/codefresh-io/kcfi) (ver. **0.5.15** is required) use: + +`kcfi images push --help` + +> Prerequisites: sa.json to access Codefresh Enterprise GCR + +`kcfi images push --codefresh-registry-secret sa.json --images-list images-list-v1.2.14 --registry localhost:5000 --user "root" --password "root"` + +- To push images via [push-to-registry.sh](https://github.com/codefresh-io/onprem-images/blob/master/push-to-registry.sh) script use (see [prerequisites](https://github.com/codefresh-io/onprem-images#prerequesites)): + +`./push-to-registry.sh localhost:5000 v1.2.14` + +#### Install/Upgrade Codefresh with private docker registry config** + +Set `usePrivateRegistry: true`, and set privateRegistry address, username and password in `config.yaml`. + +For Bitnami helm charts ([consul](https://github.com/bitnami/charts/blob/main/bitnami/consul/values.yaml), [nats](https://github.com/bitnami/charts/blob/main/bitnami/nats/values.yaml), [redis](https://github.com/bitnami/charts/blob/main/bitnami/redis/values.yaml), [rabbitmq](https://github.com/bitnami/charts/blob/main/bitnami/rabbimq/values.yaml)) define `global.imageRegistry`. + +For [ingress-nginx](https://github.com/kubernetes/ingress-nginx/blob/main/charts/ingress-nginx/values.yaml) chart define `ingress-nginx.controller.image.registry`. + + +`config.yaml` + +```yaml +global: + imageRegistry: myregistry.domain.com + +ingress-nginx: + controller: + image: + registry: myregistry.domain.com + +images: + codefreshRegistrySa: sa.json + usePrivateRegistry: true + privateRegistry: + address: myregistry.domain.com + username: + password: +``` + \ No newline at end of file diff --git a/_docs/installation/codefresh-on-prem.md b/_docs/installation/codefresh-on-prem.md new file mode 100644 index 00000000..f314b7f0 --- /dev/null +++ b/_docs/installation/codefresh-on-prem.md @@ -0,0 +1,1237 @@ +--- +title: "Codefresh On-Prem Installation & Configuration" +description: "Use the Kubernetes Codefresh Installer to install the Codefresh On-Premises platform " +group: installation +redirect_from: + - /docs/enterprise/codefresh-on-prem/ +toc: true +--- + + +This article will guide you through the installation of the Codefresh platform on your on-prem environment. This article covers all aspects of installation and configuration. Please read the article carefully before installing Codefresh. + +[kcfi](https://github.com/codefresh-io/kcfi) (the Kubernetes Codefresh Installer) is a one-stop-shop for this purpose. Even though Codefresh offers multiple tools to install components, `kcfi` aggregates all of them into a single tool. + +## Survey: What Codefresh needs to know + +Fill out this survey before the installation to make sure your on-prem environment is ready for deployment: + +[Survey](https://docs.google.com/forms/d/e/1FAIpQLSf18sfG4bEQuwMT7p11F6q70JzWgHEgoAfSFlQuTnno5Rw3GQ/viewform) + +## On-prem system requirements + +{: .table .table-bordered .table-hover} +| Item | Requirement | +| -------------- | -------------- | +|Kubernetes cluster | Server versions v1.19 through v1.22. {::nomarkdown}
      Note: Maintenance support for Kubernetes v1.19 ended on Oct 28, 2021.{:/}| +|Operating systems|{::nomarkdown}
      • Windows 10/7
      • Linux
      • OSX
      • {:/}| +|Node requirements| {::nomarkdown}
        • Memory: 5000 MB
        • CPU: 2
        {:/}| +|Git providers |{::nomarkdown}
        • GitHub: SaaS and on-premises versions
        • Bitbucket: SaaS and Bitbucket server (on-premises) 5.4.0 version and above
        • GitLab: SaaS and on-premise versions (API v4 only)
        {:/}| +|Node size | {::nomarkdown}
        • Single node: 8 CPU core and 16GB RAM
        • Multi node: master(s) + 3 nodes with 4 CPU core and 8GB RAM (24 GB in total)
        {:/}| + + + +## Prerequisites + +### Service Account file +The GCR Service Account JSON file, `sa.json` is provided by Codefresh. Contact support to get the file before installation. + +### Default app credentials +Also provided by Codefresh. Contact support to get them file before installation. + +### TLS certificates +For a secured installation, we highly recommend using TLS certificates. Make sure your `ssl.cert` and `private.key` are valid. + +> Use a Corporate Signed certificate, or any valid TLS certificate, for example, from lets-encrypt. + +### Interent connections +We require outbound internet connections for these services: +* GCR to pull platform images +* Dockerhub to pull pipeline images + + +## Security Constraints + +Codefresh has some security assumptions about the Kubernetes cluster it is installed on. + +### RBAC for Codefresh + +The Codefresh installer should be run with a Kubernetes RBAC role that allows object creation in a single namespace. If, by corporate policy, you do not allow the creation of service accounts or roles, a Kubernetes administrator will need to create the role, service account, and binding as shown below. + +>Users with the `codefresh-app` role cannot create other roles or role bindings. + +`codefresh-app-service-account.yaml` +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: codefresh-app + namespace: codefresh +``` + +`codefresh-app-role.yaml` +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: codefresh-app + namespace: codefresh +rules: +- apiGroups: + - "" + - apps + - codefresh.io + - autoscaling + - extensions + - batch + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.k8s.io + - route.openshift.io + - policy + resources: + - routes + - ingresses + - poddisruptionbudgets + verbs: + - '*' +``` + +`codefresh-app-roleBinding.yaml` +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: codefresh + name: codefresh-app-binding + namespace: codefresh +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: codefresh-app +subjects: +- kind: ServiceAccount + name: codefresh-app +``` + +To apply these changes, run: + +``` +kubectl apply -f [file] +``` + +### Operator CRD + +If, due to security rules you are not allowed to create a CRD for a client running `kcfi`, have an Administrator create the RBAC (as instructed above) and the CRD as follows: + +`codefresh-crd.yaml` +```yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: codefreshes.codefresh.io + labels: + app: cf-onprem-operator +spec: + group: codefresh.io + names: + kind: Codefresh + listKind: CodefreshList + plural: codefreshes + singular: codefresh + scope: Namespaced + subresources: + status: {} + versions: + - name: v1alpha1 + served: true + storage: true +``` + +To apply these changes, run: +``` +kubectl apply -f codefresh-crd.yaml +``` + +You will also need to modify the `config.yaml` for `kcfi` by setting `skipCRD: true` and `serviceAccountName: codefresh-app`: + +`config.yaml` +```yaml + operator: + #dockerRegistry: gcr.io/codefresh-enterprise + #image: codefresh/cf-onprem-operator + #imageTag: + serviceAccountName: codefresh-app + skipCRD: true +``` + +## Install the Codefresh Platform + +### Before you begin + +### Step1 : Download and extract `kcfi` +Download the binary for `kcfi`. It is a single binary without dependencies. + +1. Download the binary from [GitHub](https://github.com/codefresh-io/kcfi/releases){:target="\_blank"}. + >Note: Darwin is for OSX +1. Extract the downloaded file. +1. Copy the file to your $PATH: `cp /path/to/kcfi /usr/local/bin` + +### Step 2: Set the current context +* Make sure you have a `kubeconfig` file with the correct context, as in this example: + +``` +kubectl config get-contexts # display list of contexts +kubectl config use-context my-cluster-name # set the default context to my-cluster-name +kubectl config current-context # verify the current-context` +``` +### Step 3: Initialize and configure `config.yaml` +Prepare the platform for installation by initializing the directory with `config.yaml`. Then edit `config.yaml` and configure all installation settings, including files and directories required, and then deploy to Kubernetes. + +The `config.yaml` is includes descriptions for every parameter. + +1. Create the directory with the `config.yaml`: + +``` +kcfi init codefresh [-d /path/to/stage-dir] +``` +1. Below `installer`, define your installation method as either Helm or Codefresh CRD: + +```yaml + installer: + # type: + # "operator" - apply codefresh crd definition + # "helm" - install/upgrade helm chart from client +``` +1. If you are installing Codefresh in an air-gapped environment (without access to public Docker Hub or codefresh-enterprise registry), copy the images to your organization container registry (Kubernetes will pull the images from it as part of the installation). + + 1. Set `usePrivateRegistry` to `true`. + 1. Define `privateRegistry` `address`, `username` and `password`. + + +```yaml +images: + codefreshRegistrySa: sa.json + # usePrivateRegistry: false + # privateRegistry: + # address: + # username: + # password: + lists: + - images/images-list +``` +1. Push all or a single image: + * All images: + ``` + kcfi images push [-c|--config /path/to/config.yaml] + ``` + * Single image: + ``` + kcfi images push [-c|--config /path/to/config.yaml] [options] repo/image:tag [repo/image:tag] + ``` + + > To get the full list of options, run `kcfi images --help`. + + >Even if you are running a Kubernetes cluster with outgoing access to the public internet, note that Codefresh platform images are not public and can be obtained by using `sa.json` file provided by Codefresh support personnel. + Use the flag `--codefresh-registry-secret` to pass the path to the file `sa.json`. + +### Step 4: (Optional) Configure TLS certificates +If you are using TLS, enable it in `config.yaml`. + +1. Set `tls.selfSigned =false`. +1. Place both `ssl.crt` and `private.key` into certs/ directory. + +### Step 5: Deploy On-premises platform + +1. Run: + +``` +kcfi deploy [ -c config.yaml ] [ --kube-context ] [ --atomic ] [ --debug ] [ helm upgrade parameters ] +``` +### Step 6: Install the Codefresh Kubernetes Agent + +Install the `cf-k8s-agent` on a cluster separate from the installer, or in a different namespace on the same cluster. +The `cf-k8s-agent` accesses Kubernetes resources (pods, deployments, services, etc.) behind the firewall to display them in the Codefresh UI. The agent streams updates from cluster resources and then sends information updates to the `k8s-monitor` service. + +1. Create a staging directory for the agent: + +``` +kcfi init k8s-agent +``` + A staging directory is created, named k8s-agent with a `config.yaml`. +1. Edit k8s-agent/config.yaml ?? for what?? + +1. Run: + +``` +kcfi deploy [ -c config.yaml ] [-n namespace] +``` + where: + [namespace] is the namespace if you are installing the agent in the same cluster. + + + + +## High-Availability (HA) with active-passive clusters +Enable high-availability in the Codefresh platform for disaster recovery with an active-passive cluster configuration. +Review the prerequisites, and then do the following to configure high-availability: +* For new installations, install Codefresh on the active cluster +* Install Codefresh on the passive cluster +* When needed, switch between clusters for disaster recovery + +### Prerequisites + +* **K8s clusters** + Two K8s clusters, one designated as the active cluster, and the other designated as the passive cluster for disaster recovery. + +* **External databases and services** + Databases and services external to the clusters. + + * Postgres database (see [Configuring an external Postgres database](#configuring-an-external-postgres-database)) + * MongoDB (see [Configuring an external MongoDB](#configuring-an-external-mongodb)) + * Redis service (see [Configuring an external Redis service](#configure-an-external-redis-service)) + * RabbitMQ service (see [Configuring an external RabbitMQ service](#configure-an-external-redis-service)) + * Consul service (see [Configuring an external Consul service](#configuring-an-external-consul-service)) + +* **DNS record** + To switch between clusters for disaster recovery + +### Install Codefresh on active cluster + +If you are installing Codefresh for the first time, install Codefresh on the cluster designated as the _active_ cluster. +See [Installing the Codefresh platform]({{site.baseurl}}/docs/administration/codefresh-on-prem/#install-the-codefresh-platform). + +### Install Codefresh on passive cluster + +First get the `values.yaml` file from the current Codefresh installation on the active cluster. Then install Codefresh on the passive cluster using Helm. + +**1. Get values.yaml** +1. Switch your kube context to the active cluster. +1. Get `values.yaml` from the active cluster: + `helm get values ${release_name} -n ${namespace} > cf-passive-values.yaml` + where: + `{release-version}` is the name of the Codefresh release, and is by default `cf`. + `${namespace}` is the namespace with the Codefresh release, and is by default `codefresh`. + +{:start="3"} +1. Update the required variables in `cf-passive-values.yaml`. + > If the variables do not exist, add them to the file. + + * In the `global` section, disable `seedJobs` by setting it to `false`: + + ```yaml + global: + seedJobs: false + ``` + + * Add variable `FREEZE_WORKFLOWS_EXECUTION` to `cfapi`, and set it to `true`. + + ```yaml + cfapi: + env: + FREEZE_WORKFLOWS_EXECUTION: true + ``` + +**2. Install Codefresh on passive cluster** + +1. Download the Helm chart: + `helm repo add codefresh-onprem https://chartmuseum.codefresh.io/codefresh` + `helm fetch codefresh-onprem/codefresh --version ${release-version}` + where: + `{release-version}` is the version of Codefresh you are downloading. + +1. Unzip the Helm chart: + `tar -xzf codefresh-${release-version}.tgz` +1. Go to the folder where you unzipped the Helm chart. +1. Install Codefresh with the Helm command using `cf-passive-values.yaml`: + `helm install cf . -f ${path}/cf-passive-values.yaml -n codefresh` + + +### Switch between clusters for disaster recovery + +For disaster recovery, switch between the active and passive clusters. + +1. In the `cfapi` deployment on the _active_ cluster, change the value of `FREEZE_WORKFLOWS_EXECUTION` from `false` to `true`. + If the variable does not exist, add it, and make sure the value is set to `true`. +1. In the `cfapi` deployment on the _passive_ cluster, change the value of `FREEZE_WORKFLOWS_EXECUTION` from `true` to `false`. +1. Switch DNS from the currently active cluster to the passive cluster. + +### Services without HA + +The following services cannot run in HA, but are not critical in case of downtime or during the process of switchover from active to passive. +These services are not considered critical as they are part of build-handling. In case of failure, a build retry occurs, ensuring that the build is always handled. +* `cronus` +* `cf-sign` + + +## Additional configuration + +After you install Codefresh, these are post-installation operations that you should follow. + +### Selectively enable SSO provider for account +As a Codefresh administrator, you can select the providers you want to enable for SSO in your organization, for both new and existing accounts. +You can always renable a provider when needed. + + +1. Sign in as Codefresh admin. +1. From the left pane, select **Providers**. +1. Disable the providers not relevant for the accounts. +These providers are not displayed as options during sign-up/sign-in. + + +### (Optional) Set up Git integration + +Codefresh supports out-of-the-box Git logins using your local username and password, or logins using your Git provider, as described below.You can also configure login to supported SSO providers after installation, as described in [Setting up OpenID Connect (OIDC) Federated Single Sign-On (SSO)]({{site.baseurl}}/docs/administration/single-sign-on/oidc). + +If you’d like to set up a login to Codefresh using your Git provider, first login using the default credentials (username: `AdminCF`, password: `AdminCF` and add your Git provider OAuth integration details in our admin console: + +**Admin Management** > **IDPs** tab + +To get the Client ID and Client Secret for each of the supported Git providers, follow the instructions according to your VCS provider. + +#### GitHub Enterprise + +Navigate to your GitHub organization settings: https://github.com/organizations/your_org_name/settings. + +On the left-hand side, under **Developer settings**, select **OAuth Apps**, and click **Register an Application**. + +Complete the OAuth application registration as follows: + +- **Application name:** codefresh-on-prem (or a significant name) +- **Homepage URL:** https://your-codefresh-onprem-domain +- **Authorization callback URL:** https://your-codefresh-onprem-domain/api/auth/github/callback + +After registration, note down the created Client ID and Client Secret. They will be required for the settings in **Codefresh Admin**->**IDPs** + +#### GitLab + +Navigate to your Applications menu in GitLab User Settings: https://gitlab.com/profile/applications + +Complete the application creation form as follows: + +- **Name:** codefresh-onprem (or a significant name) +- **Redirect URI:** https://your-codefresh-onprem-domain/api/auth/gitlab/callback +- **Scopes (permissions):** + - API + - read_user + - read_registry + +Click **Save application**. + +After app creation, note down the created Application ID and Client Secret. They will be required for the settings in **Codefresh Admin**->**IDPs**. + +{% include image.html + lightbox="true" + file="/images/installation/git-idp.png" + url="/images/installation/git-idp.png" + %} + +>Note: When configuring the default IDP (for GitHub, GitLab, etc), do not modify the Client Name field. Please keep them as GitHub, GitLab, BitBucket, etc. Otherwise, the signup and login views won’t work. + +### Proxy Configuration + +If your environment resides behind HTTP proxies, you need to uncomment the following section in `config.yaml`: + +```yaml +global: + env: + HTTP_PROXY: "http://myproxy.domain.com:8080" + http_proxy: "http://myproxy.domain.com:8080" + HTTPS_PROXY: "http://myproxy.domain.com:8080" + https_proxy: "http://myproxy.domain.com:8080" + NO_PROXY: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-nginx-controller,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis-master,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" + no_proxy: "127.0.0.1,localhost,kubernetes.default.svc,.codefresh.svc,100.64.0.1,169.254.169.254,cf-builder,cf-cfapi,cf-cfui,cf-chartmuseum,cf-charts-manager,cf-cluster-providers,cf-consul,cf-consul-ui,cf-context-manager,cf-cronus,cf-helm-repo-manager,cf-hermes,cf-ingress-nginx-controller,cf-kube-integration,cf-mongodb,cf-nats,cf-nomios,cf-pipeline-manager,cf-postgresql,cf-rabbitmq,cf-redis-master,cf-registry,cf-runner,cf-runtime-environment-manager,cf-store" +``` +In addition to this, you should also add your Kubernetes API IP address (`kubectl get svc kubernetes`) to both: `NO_PROXY` and `no_proxy`. + +### Storage + +Codefresh is using both cluster storage (volumes) as well as external storage. + +#### Databases + +The following table displays the list of databases created as part of the installation: + +| Database | Purpose | Latest supported version | +|----------|---------| ---------------| +| mongoDB | storing all account data (account settings, users, projects, pipelines, builds etc.) | 4.2.x | +| postgresql | storing data about events that happened on the account (pipeline updates, deletes, etc.). The audit log uses the data from this database. | 13.x | +| redis | mainly used for caching, but also used as a key-value store for our trigger manager. | 6.0.x | + +#### Volumes + +These are the volumes required for Codefresh on-premises: + + +{: .table .table-bordered .table-hover} +| Name | Purpose | Minimum Capacity | Can run on netfs (nfs, cifs) | +|----------------|------------------------|------------------|------------------------------| +| cf-mongodb* | Main database - Mongo | 8GB | Yes** | +| cf-postgresql* | Events databases - Postgres | 8GB | Yes** | +| cf-rabbitmq* | Message broker | 8GB | No** | +| cf-redis* | Cache | 8GB | No** | +| cf-store | Trigger Redis data | 8GB | No** | +| cf-cronus | Trigger crontab data | 1GB | Yes | +| datadir-cf-consul-0 | Consul datadir | 1GB | Yes | +| cf-chartmuseum | chartmuseum | 10GB | Yes | +| cf-builder-0 | /var/lib/docker for builder | 100GB | No*** | +| cf-runner-0 | /var/lib/docker for composition runner | 100GB | No*** | + +{% raw %} + + (*) Possibility to use external service + + (**) Running on netfs (nfs, cifs) is not recommended by product admin guide + + (***) Docker daemon can be run on block device only + +{% endraw %} + +StatefulSets (`cf-builder` and `cf-runner`) process their data on separate physical volumes (PVs) and can be claimed using Persistent Volume Claims (PVCs) with default initial sizes of 100Gi. Also, those StatefulSets have the ability to connect to existing pre-defined PVCs. + +The default initial volume size (100 Gi) can be overridden in the custom `config.yaml` file. Values descriptions are in the `config.yaml` file. +The registry’s initial volume size is 100Gi. It also can be overridden in a custom `config.yaml` file. There is a possibility to use a customer-defined registry configuration file (`config.yaml`) that allows using different registry storage back-ends (S3, Azure Blob, GCS, etc.) and other parameters. More details can be found in the [Docker documentation](https://docs.docker.com/registry/configuration/). + +Depending on the customer’s Kubernetes version we can assist with PV resizing. Details are can be found in this [Kubernetes blog post](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/). + +#### Automatic Volume Provisioning + +Codefresh installation supports automatic storage provisioning based on the standard Kubernetes dynamic provisioner Storage Classes and Persistent Volume Claims. All required installation volumes will be provisioned automatically using the default Storage Class or custom Storage Class that can be specified as a parameter in `config.yaml` under `storageClass: my-storage-class`. + + + +### Retention policy for Codefresh builds +Define a retention policy to manage Codefresh builds. The retention settings are controlled through `cf-api` deployment environment variables, all of which have default settings which you can retain or customize. By default, Codefresh deletes builds older than six months, including offline logs. + +The retention mechanism, implemented as a Cron Job, removes data from collections such as: +* workflowproccesses +* workflowrequests +* workflowrevisions + +{: .table .table-bordered .table-hover} +| Env Variable | Description | Default | +|---------------|--------------------------- |---------------------- | +|`RETENTION_POLICY_IS_ENABLED` | Determines if automatic build deletion through the Cron job is enabled. | `true` | +|`RETENTION_POLICY_BUILDS_TO_DELETE`| The maximum number of builds to delete by a single Cron job. To avoid database issues, especially when there are large numbers of old builds, we recommend deleting them in small chunks. You can gradually increase the number after verifying that performance is not affected. | `50` | +|`RETENTION_POLICY_DAYS` | The number of days for which to retain builds. Builds older than the defined retention period are deleted. | `180` | +|`RUNTIME_MONGO_URI` | Optional. The URI of the Mongo database from which to remove MongoDB logs (in addition to the builds). | | + + +### Managing Codefresh backups + +Codefresh on-premises backups can be automated by installing a specific service as an addon to your Codefresh on-premises installation. It is based on the [mgob](https://github.com/stefanprodan/mgob){:target="\_blank"} open source project, and can run scheduled backups with retention, S3 & SFTP upload, notifications, instrumentation with Prometheus and more. + +#### Configure and deploy the Backup Manager + +Backup Manager is installed as an addon and therefore it needs an existing Codefresh on-premises installation. +Before installing it, please make sure you have selected a proper kube config pointing to the cluster, where you have Codefresh installed on. + +1. Go to the staging directory of your Codefresh installation, and open the config file: `your-CF-stage-dir/addons/backup-manager/config.yaml`. +1. Retain or customize the values of these configuration parameters: + * `metadada`: Various CF-installer-specific parameters, which should not be changed in this case + * `kubernetes`: Specify a kube context, kube config file, and a namespace for the backup manager + * `storage`: Storage class, storage size and read modes for persistent volumes to store backups locally within your cluster + * Backup plan configuration parameters under `jobConfigs.cfBackupPlan`: + * `target.uri` - target mongo URI. It is recommended to leave the mongo uri value blank - it will be taken automatically from the Codefresh release installed in your cluster + * `scheduler` - here you can specify cron expression for your backups schedule, backups retention and timeout values + +For more advanced backup plan settings, such as specifying various remote cloud-based storage providers for your backups, configuring notifications and other, please refer to [this](https://github.com/stefanprodan/mgob#configure) page + +To **deploy the backup manager** service, please select a correct kube context, where you have Codefresh on-premises installed and deploy backup-manager with the following command: + +``` +kcfi deploy -c `your-CF-stage-dir/addons/backup-manager/config.yaml` +``` + +#### On-demand/ad-hoc backup +``` +kubectl port-forward cf-backup-manager-0 8090 +curl -X POST http://localhost:8090/backup/cfBackupPlan +``` + +#### Restore from backup +``` +kubectl exec -it cf-backup-manager-0 bash +mongorestore --gzip --archive=/storage/cfBackupPlan/backup-archive-name.gz --uri mongodb://root:password@mongodb:27017 --drop +``` + +### Configuring AWS Load Balancers + +By default Codefresh deploys the [ingress-nginx](https://github.com/kubernetes/ingress-nginx/) controller and [Classic Load Balancer](https://docs.aws.amazon.com/eks/latest/userguide/load-balancing.html) as a controller service. + +#### NLB + +To use a **Network Load Balancer** - deploy a regular Codefresh installation with the following ingress config for the the `cf-ingress-controller` controller service. + +`config.yaml` +```yaml +ingress-nginx: + controller: + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: nlb + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '60' + service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled: 'true' + +tls: + selfSigned: false + cert: certs/certificate.crt + key: certs/private.key +``` +This annotation will create a new Load Balancer - Network Load Balancer, which you should use in the Codefresh UI DNS record. +Update the DNS record according to the new service. + +#### L7 ELB with SSL Termination + +When a **Classic Load Balancer** is used, some Codefresh features that (for example `OfflineLogging`), will use a websocket to connect with Codefresh API and they will require secure TCP (SSL) protocol enabled on the Load Balancer listener instead of HTTPS. + +To use either a certificate from a third party issuer that was uploaded to IAM or a certificate [requested](https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) within AWS Certificate Manager see the followning config example: + + +`config.yaml` +```yaml +ingress-nginx: + controller: + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp" + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443" + service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600' + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: < CERTIFICATE ARN > + targetPorts: + http: http + https: http + +tls: + selfSigned: true +``` + +- both http and https target port should be set to **80**. +- update your AWS Load Balancer listener for port 443 from HTTPS protocol to SSL. + +#### ALB + +To use the **Application Load Balancer** the [ALB Ingress Controller](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) should be deployed to the cluster. + +To support ALB: + +- First disable Nginx controller in the Codefresh init config file - __config.yaml__: + +```yaml +ingress-nginx: #disables creation of Nginx controller deployment + enabled: false + +ingress: #disables creation of Ingress object + enabled: false +``` + +- [deploy](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) the ALB controller; +- create a new **ingress** resource: + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/scheme: internet-facing + alb.ingress.kubernetes.io/target-type: ip + kubernetes.io/ingress.class: alb + meta.helm.sh/release-name: cf + meta.helm.sh/release-namespace: codefresh + labels: + app: cf-codefresh + release: cf + name: cf-codefresh-ingress + namespace: codefresh +spec: + defaultBackend: + service: + name: cf-cfui + port: + number: 80 + rules: + - host: myonprem.domain.com + http: + paths: + - backend: + service: + name: cf-cfapi + port: + number: 80 + path: /api/* + pathType: ImplementationSpecific + - backend: + service: + name: cf-cfapi + port: + number: 80 + path: /ws/* + pathType: ImplementationSpecific + - backend: + service: + name: cf-cfui + port: + number: 80 + path: / + pathType: ImplementationSpecific +``` + +### Configure CSP (Content Security Policy) +Add CSP environment variables to `config.yaml`, and define the values to be returned in the CSP HTTP headers. +```yaml +cfui: + env: + CONTENT_SECURITY_POLICY: "" + CONTENT_SECURITY_POLICY_REPORT_ONLY: "default-src 'self'; font-src 'self' + https://fonts.gstatic.com; script-src 'self' https://unpkg.com https://js.stripe.com; + style-src 'self' https://fonts.googleapis.com; 'unsafe-eval' 'unsafe-inline'" + CONTENT_SECURITY_POLICY_REPORT_TO: "" +``` +`CONTENT_SECURITY_POLICY` is the string describing content policies. Use semi-colons to separate between policies. +`CONTENT_SECURITY_POLICY_REPORT_TO` is a comma-separated list of JSON objects. Each object must have a name and an array of endpoints that receive the incoming CSP reports. + +For detailed information, see the [Content Security Policy article on MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP). + +### Enable x-hub-signature-256 signature for GitHub AE +Add the `USE_SHA256_GITHUB_SIGNATURE` environment variable to **cfapi** deployment in `config.yaml`. +```yaml +cfapi: + env: + USE_SHA256_GITHUB_SIGNATURE: "true" +``` + +For detailed information, see the [Securing your webhooks](https://docs.github.com/en/developers/webhooks-and-events/webhooks/securing-your-webhooks) and [Webhooks](https://docs.github.com/en/github-ae@latest/rest/webhooks). + + +## Using existing external services for data storage/messaging + +Normally the Codefresh installer, is taking care of all needed dependencies internally by deploying the respective services (mongo, redis etc) on its own. + +You might want however to use your own existing options if you already have those services up and running externally. + +### Configuring an external Postgres database + +It is possible to configure Codefresh to work with your existing Postgres database service, if you don't want to use the default one as provided by the Codefresh installer. + +#### Configuration steps + +All the configuration comes down to putting a set of correct values into your Codefresh configuration file `config.yaml`, which is present in `your/stage-dir/codefresh` directory. During the installation, Codefresh will run a seed job, using the values described in the following steps: + +1. Specify a user name `global.postgresSeedJob.user` and password `global.postgresSeedJob.password` for a seed job. This must be a privileged user allowed to create databases and roles. It will be used only by the seed job to create the needed database and a user. +2. Specify a user name `global.postgresUser` and password `global.postgresPassword` to be used by Codefresh installation. A user with the name and password will be created by the seed job and granted with required privileges to access the created database. +3. Specify a database name `global.postgresDatabase` to be created by the seed job and used by Codefresh installation. +4. Specify `global.postgresHostname` and optionally `global.postgresPort` (`5432` is a default value). +5. Disable the postgres subchart installation with the `postgresql.enabled: false` value, because it is not needed in this case. + + +Below is an example of the relevant piece of `config.yaml`: + +```yaml +global: + postgresSeedJob: + user: postgres + password: zDyGp79XyZEqLq7V + postgresUser: cf_user + postgresPassword: fJTFJMGV7sg5E4Bj + postgresDatabase: codefresh + postgresHostname: my-postgres.ccjog7pqzunf.us-west-2.rds.amazonaws.com + postgresPort: 5432 + +postgresql: + enabled: false #disable default postgresql subchart installation +``` +#### Running the seed job manually + +If you prefer running the seed job manually, you can do it by using a script present in `your/stage-dir/codefresh/addons/seed-scripts` directory named `postgres-seed.sh`. The script takes the following set of variables that you need to have set before running it: + +```shell +export POSTGRES_SEED_USER="postgres" +export POSTGRES_SEED_PASSWORD="zDyGp79XyZEqLq7V" +export POSTGRES_USER="cf_user" +export POSTGRES_PASSWORD="fJTFJMGV7sg5E4Bj" +export POSTGRES_DATABASE="codefresh" +export POSTGRES_HOST="my-postgres.ccjog7pqzunf.us-west-2.rds.amazonaws.com" +export POSTGRES_PORT="5432" +``` +The variables have the same meaning as the configuration values described in the previous section about Postgres. + +However you **still need to specify a set of values** in the Codefresh config file as described in the section above, but with the whole **`postgresSeedJob` section omitted**, like this: + +```yaml +global: + postgresUser: cf_user + postgresPassword: fJTFJMGV7sg5E4Bj + postgresDatabase: codefresh + postgresHostname: my-postgresql.prod.svc.cluster.local + postgresPort: 5432 + +postgresql: + enabled: false #disable default postgresql subchart installation +``` + +### Configuring an external MongoDB + +Codefresh recommends to use the Bitnami MongoDB [chart](https://github.com/bitnami/charts/tree/master/bitnami/mongodb) as a Mongo database. The supported version of Mongo is 4.2.x + +To configure Codefresh on-premises to use an external Mongo service one needs to provide the following values in `config.yaml`: + +- **mongo connection string** - `mongoURI`. This string will be used by all of the services to communicate with mongo. Codefresh will automatically create and add a user with "ReadWrite" permissions to all of the created databases with the username and password from the URI. Optionally, automatic user addition can be disabled - `mongoSkipUserCreation`, in order to use already existing user. In such a case the existing user must have **ReadWrite** permissions to all of newly created databases +Codefresh does not support [DNS Seedlist Connection Format](https://docs.mongodb.com/manual/reference/connection-string/#connections-dns-seedlist) at the moment, use the [Standard Connection Format](https://docs.mongodb.com/manual/reference/connection-string/#connections-standard-connection-string-format) instead. +- mongo **root user** name and **password** - `mongodbRootUser`, `mongodbRootPassword`. The privileged user will be used by Codefresh only during installation for seed jobs and for automatic user addition. After installation, credentials from the provided mongo URI will be used. Mongo root user must have permissions to create users. + +See the [Mongo required Access](https://docs.mongodb.com/manual/reference/method/db.createUser/#required-access) for more details. + +Here is an example of all the related values: + +```yaml +global: + mongodbRootUser: + mongodbRootPassword: + mongoURI: + mongoSkipUserCreation: true + mongoDeploy: false # disables deployment of internal mongo service + +mongo: + enabled: false + ``` + +#### MongoDB with Mutual TLS + +>The option available in kcfi **v0.5.10** + +Codefresh supports enabling SSL/TLS between cf microservices and MongoDB. To enable this option specify in `config.yaml` the following parameters: + + `global.mongoTLS: true`
        + `global.mongoCaCert` - CA certificate file path (in kcfi init directory)
        + `global.mongoCaKey` - CA certificate private key file path (in kcfi init directory) + +`config.yaml` example: +```yaml +global: + mongodbRootUser: root + mongodbRootPassword: WOIqcSwr0y + mongoURI: mongodb://my-mongodb.prod.svc.cluster.local/?ssl=true&authMechanism=MONGODB-X509&authSource=$external + mongoSkipUserCreation: true + mongoDeploy: false # disables deployment of internal mongo service + + mongoTLS: true #enable MongoDB TLS support + mongoCaCert: mongodb-ca/ca-cert.pem + mongoCaKey: mongodb-ca/ca-key.pem + + ### for OfflineLogging feature + runtimeMongoURI: mongodb://my-mongodb.prod.svc.cluster.local/?ssl=true&authMechanism=MONGODB-X509&authSource=$external + +### for OfflineLogging feature +cfapi: + env: + RUNTIME_MONGO_TLS: "true" + RUNTIME_MONGO_TLS_VALIDATE: "true" # 'false' if self-signed certificate to avoid x509 errors + +## set MONGO_MTLS_VALIDATE to `false` if self-signed certificate to avoid x509 errors +cluster-providers: + env: + MONGO_MTLS_VALIDATE: "false" + +k8s-monitor: + env: + MONGO_MTLS_VALIDATE: "false" + +mongo: + enabled: false #disable default mongodb subchart installation + ``` + + >Perform an upgarde:
        + >`kcfi deploy -c config.yaml --debug` + +### Configure an external Redis service +Codefresh recommends to use the Bitnami Redis [chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) as a Redis store. + +**Limitations** + +Codefresh does not support secure connection to Redis (TLS) and AUTH username extension. + +**Configuration** + +To configure Codefresh to use an external Redis service, add the following parameters to your `config.yaml`: + +`config.yaml` example: +```yaml +global: + redisUrl: my-redis.prod.svc.cluster.local + redisPort: 6379 + redisPassword: 6oOhHI8fI5 + + runtimeRedisHost: my-redis.prod.svc.cluster.local + runtimeRedisPassword: 6oOhHI8fI5 + runtimeRedisPort: 6379 + runtimeRedisDb: 2 + +redis: + enabled: false #disable default redis subchart installation +``` + +Where `redis*` - are for the main Redis storage, and `runtimeRedis*` - for storage is used to store pipeline logs in case of `OfflineLogging` feature is turned on. In most cases the host value is the same for these two values. + + +### Configuring an external RabbitMQ service + +Codefresh recommends to use the Bitnami RabbitMQ [chart](https://github.com/bitnami/charts/tree/master/bitnami/rabbitmq) as a RabbitMQ service. + +To use an external RabbitMQ service instead of the local helm chart, add the following values to the __config.yaml__: + +```yaml +rabbitmq: + enabled: false + +global: + rabbitmqUsername: + rabbitmqPassword: + rabbitmqHostname: +``` + +### Configuring an external Consul service + + +Notice that at the moment Codefresh supports only the deprecated Consul API (image __consul:1.0.0__), and does not support connection via HTTPS and any authentication. +The Consul host must expose port `8500`. + +>In general, we don't recommend to take the Consul service outside the cluster. + + +To configure Codefresh to use your external Consul service, add the following values to the __config.yaml__: + +```yaml +global: + consulHost: + +consul: + enabled: false +``` + +## App Cluster Autoscaling + +Autoscaling in Kubernetes is implemented as an interaction between Cluster Autoscaler and Horizontal Pod Autoscaler + +{: .table .table-bordered .table-hover} +| | Scaling Target| Trigger | Controller | How it Works | +| ----------- | ------------- | ------- | --------- | --------- | +| [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler)| Nodes | **Up:** Pending pod
        **Down:** Node resource allocations is low | On GKE we can turn on/off autoscaler and configure min/max per node group can be also installed separately | Listens on pending pods for scale up and node allocations for scaledown. Should have permissions to call cloud api. Considers pod affinity, pdb, storage, special annotations | +| [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | replicas on deployments or StatefulSets | metrics value thresholds defined in HPA object | part of Kubernetes controller | Controller gets metrics from "metrics.k8s.io/v1beta1" , "custom.metrics.k8s.io/v1beta1", "external.metrics.k8s.io/v1beta1" requires [metrics-server](https://github.com/kubernetes-sigs/metrics-server) and custom metrics adapters ([prometheus-adapter](https://github.com/kubernetes-sigs/prometheus-adapter), [stackdriver-adapter](https://github.com/GoogleCloudPlatform/k8s-stackdriver/tree/master/custom-metrics-stackdriver-adapter)) to listen on this API (see note (1) below) and adjusts deployment or sts replicas according to definitions in HorizontalPodAutocaler
        There are v1 and beta api versions for HorizontalPodAutocaler:
        [v1](https://github.com/kubernetes/api/blob/master/autoscaling/v1/types.go) - supports for resource metrics (cpu, memory) - `kubect get hpa`
        [v2beta2](https://github.com/kubernetes/api/blob/master/autoscaling/v2beta2/types.go) and [v2beta1](https://github.com/kubernetes/api/blob/master/autoscaling/v2beta1/types.go) - supports for both resource and custom metrics - `kubectl get hpa.v2beta2.autoscaling` **The metric value should decrease on adding new pods.**
        *Wrong metrics Example:* request rate
        *Right metrics Example:* average request rate per pod | + +Note (1) +``` +kubectl get apiservices | awk 'NR==1 || $1 ~ "metrics"' +NAME SERVICE AVAILABLE AGE +v1beta1.custom.metrics.k8s.io monitoring/prom-adapter-prometheus-adapter True 60d +v1beta1.metrics.k8s.io kube-system/metrics-server True 84d +``` + + +**Implementation in Codefresh** + +* Default “Enable Autoscaling” settings for GKE +* Using [prometheus-adapter](https://github.com/kubernetes-sigs/prometheus-adapter) with custom metrics + +We define HPA for cfapi and pipeline-manager services + +**CFapi HPA object** + +It's based on three metrics (HPA controller scales of only one of the targetValue reached): + +``` +kubectl get hpa.v2beta1.autoscaling cf-cfapi -oyaml +``` + +{% highlight yaml %} +{% raw %} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + annotations: + meta.helm.sh/release-name: cf + meta.helm.sh/release-namespace: default + labels: + app.kubernetes.io/managed-by: Helm + name: cf-cfapi + namespace: default +spec: + maxReplicas: 16 + metrics: + - object: + metricName: requests_per_pod + target: + apiVersion: v1 + kind: Service + name: cf-cfapi + targetValue: "10" + type: Object + - object: + metricName: cpu_usage_avg + target: + apiVersion: apps/v1 + kind: Deployment + name: cf-cfapi-base + targetValue: "1" + type: Object + - object: + metricName: memory_working_set_bytes_avg + target: + apiVersion: apps/v1 + kind: Deployment + name: cf-cfapi-base + targetValue: 3G + type: Object + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: cf-cfapi-base +{% endraw%} +{% endhighlight %} + +* `requests_per_pod` is based on `rate(nginx_ingress_controller_requests)` metric ingested from nginx-ingress-controller +* `cpu_usage_avg` based on cadvisor (from kubelet) rate `(rate(container_cpu_user_seconds_total)` +* `memory_working_set_bytes_avg` based on cadvisor `container_memory_working_set_bytes` + +**pipeline-manager HPA** + +based on `cpu_usage_avg` + +{% highlight yaml %} +{% raw %} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + annotations: + meta.helm.sh/release-name: cf + meta.helm.sh/release-namespace: default + labels: + app.kubernetes.io/managed-by: Helm + name: cf-pipeline-manager +spec: + maxReplicas: 8 + metrics: + - object: + metricName: cpu_usage_avg + target: + apiVersion: apps/v1 + kind: Deployment + name: cf-pipeline-manager-base + targetValue: 400m + type: Object + minReplicas: 2 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: cf-pipeline-manager-base +{% endraw%} +{% endhighlight %} + +**prometheus-adapter configuration** + +Reference: [https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md +) + +{% highlight yaml %} +{% raw %} +Rules: + - metricsQuery: | + kube_service_info{<<.LabelMatchers>>} * on() group_right(service) + (sum(rate(nginx_ingress_controller_requests{<<.LabelMatchers>>}[2m])) + / on() kube_deployment_spec_replicas{deployment='<>-base',namespace='<>'}) + name: + as: requests_per_pod + matches: ^(.*)$ + resources: + overrides: + namespace: + resource: namespace + service: + resource: service + seriesQuery: kube_service_info{service=~".*cfapi.*"} + - metricsQuery: | + kube_deployment_labels{<<.LabelMatchers>>} * on(label_app) group_right(deployment) + (label_replace( + avg by (container) (rate(container_cpu_user_seconds_total{container=~"cf-(tasker-kubernetes|cfapi.*|pipeline-manager.*)", job="kubelet", namespace='<>'}[15m])) + , "label_app", "$1", "container", "(.*)")) + name: + as: cpu_usage_avg + matches: ^(.*)$ + resources: + overrides: + deployment: + group: apps + resource: deployment + namespace: + resource: namespace + seriesQuery: kube_deployment_labels{label_app=~"cf-(tasker-kubernetes|cfapi.*|pipeline-manager.*)"} + - metricsQuery: "kube_deployment_labels{<<.LabelMatchers>>} * on(label_app) group_right(deployment)\n + \ (label_replace(\n avg by (container) (avg_over_time (container_memory_working_set_bytes{container=~\"cf-.*\", + job=\"kubelet\", namespace='<>'}[15m]))\n + \ , \"label_app\", \"$1\", \"container\", \"(.*)\"))\n \n" + name: + as: memory_working_set_bytes_avg + matches: ^(.*)$ + resources: + overrides: + deployment: + group: apps + resource: deployment + namespace: + resource: namespace + seriesQuery: kube_deployment_labels{label_app=~"cf-.*"} + - metricsQuery: | + kube_deployment_labels{<<.LabelMatchers>>} * on(label_app) group_right(deployment) + label_replace(label_replace(avg_over_time(newrelic_apdex_score[15m]), "label_app", "cf-$1", "exported_app", '(cf-api.*|pipeline-manager|tasker-kuberentes)\\[kubernetes\\]'), "label_app", "$1cfapi$3", "label_app", '(cf-)(cf-api)(.*)') + name: + as: newrelic_apdex + matches: ^(.*)$ + resources: + overrides: + deployment: + group: apps + resource: deployment + namespace: + resource: namespace + seriesQuery: kube_deployment_labels{label_app=~"cf-(tasker-kubernetes|cfapi.*|pipeline-manager)"} +{% endraw%} +{% endhighlight %} + +**How to define HPA in Codefresh installer (kcfi) config** + +Most of Codefresh's Microservices subcharts contain `templates/hpa.yaml`: + +{% highlight yaml %} +{% raw %} +{{- if .Values.HorizontalPodAutoscaler }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "cfapi.fullname" . }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "cfapi.fullname" . }}-{{ .version | default "base" }} + minReplicas: {{ coalesce .Values.HorizontalPodAutoscaler.minReplicas .Values.replicaCount 1 }} + maxReplicas: {{ coalesce .Values.HorizontalPodAutoscaler.maxReplicas .Values.replicaCount 2 }} + metrics: +{{- if .Values.HorizontalPodAutoscaler.metrics }} +{{ toYaml .Values.HorizontalPodAutoscaler.metrics | indent 4 }} +{{- else }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 +{{- end }} +{{- end }} +{% endraw%} +{% endhighlight %} + +To configure HPA for CFapi add `HorizontalPodAutoscaler` values to config.yaml, for example: + +(assuming that we already have prometheus adapter configured for metrics `requests_per_pod`, `cpu_usage_avg`, `memory_working_set_bytes_avg`) + +{% highlight yaml %} +{% raw %} +cfapi: + replicaCount: 4 + resources: + requests: + memory: "4096Mi" + cpu: "1100m" + limits: + memory: "4096Mi" + cpu: "2200m" + HorizontalPodAutoscaler: + minReplicas: 2 + maxReplicas: 16 + metrics: + - type: Object + object: + metricName: requests_per_pod + target: + apiVersion: "v1" + kind: Service + name: cf-cfapi + targetValue: 10 + - type: Object + object: + metricName: cpu_usage_avg + target: + apiVersion: "apps/v1" + kind: Deployment + name: cf-cfapi-base + targetValue: 1 + - type: Object + object: + metricName: memory_working_set_bytes_avg + target: + apiVersion: "apps/v1" + kind: Deployment + name: cf-cfapi-base + targetValue: 3G +{% endraw%} +{% endhighlight %} + +**Querying metrics (for debugging)** + +CPU Metric API Call + +``` +kubectl get --raw /apis/metrics.k8s.io/v1beta1/namespaces/codefresh/pods/cf-cfapi-base-****-/ | jq +``` + +Custom Metrics Call + +``` +kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1/namespaces/codefresh/services/cf-cfapi/requests_per_pod | jq +``` + + +## Common Problems, Solutions, and Dependencies + +### Dependencies + +#### Mongo + +All services using the MongoDB are dependent on the `mongo` pod being up and running. If the `mongo` pod is down, the following dependencies will not work: + +- `runtime-environment-manager` +- `pipeline-manager` +- `cf-api` +- `cf-broadcaster` +- `context-manager` +- `nomios` +- `cronius` +- `cluster-promoters` +- `k8s-monitor` +- `charts-manager` +- `tasker-kubernetes` + +#### Logs + +There is a dependency between the `cf-broadcaster` pod and the `cf-api` pod. If your pipeline runs, but does not show any logs, try restarting the broadcaster pod. + +### Problems and Solutions + +**Problem:** installer fails because `codefresh` database does not exist. + +**Solution:** If you are using an external PostgresSQL database (instead of the internal one that the installer provides), you will first need to manually create a new database named `codefresh` inside your PostgresSQL database before running the installer. + + diff --git a/_docs/installation/codefresh-runner.md b/_docs/installation/codefresh-runner.md new file mode 100644 index 00000000..a5fde797 --- /dev/null +++ b/_docs/installation/codefresh-runner.md @@ -0,0 +1,2072 @@ +--- +title: "Codefresh Runner installation" +description: "Run Codefresh pipelines on your private Kubernetes cluster" +group: installation +redirect_from: + - /docs/enterprise/codefresh-runner/ +toc: true +--- + +Install the Codefresh Runner on your Kubernetes cluster to run pipelines and access secure internal services without compromising on-premises security requirements. These pipelines run on your infrastructure, even behind the firewall, and keep code on your Kubernetes cluster secure. + +[Skip to quick installation →](#installation-with-the-quick-start-wizard) + +>Important: + You must install the Codefresh Runner on _each cluster running Codefresh pipelines_. + The Runner is **not** needed in clusters used for _deployment_. You can deploy applications on clusters other than the ones the runner is deployed on. + +The installation process takes care of all Runner components and other required resources (config-maps, secrets, volumes). + +## Prerequisites + +To use the Codefresh runner the following is required: + +1. A Kubernetes cluster with outgoing internet access (versions 1.10 to 1.23). Each node should have 50GB disk size. +2. A container runtime, such as [docker](https://kubernetes.io/blog/2020/12/02/dockershim-faq/), [containerd](https://containerd.io/) or [cri-o](https://cri-o.io/). Note that the runner is **not** dependent on any special dockershim features, so any compliant container runtime is acceptable. The docker socket/daemon used by Codefresh pipelines is **NOT** the one on the host node (as it might not exist at all in the case of containerd or cri-o), but instead an internal docker daemon created/managed by the pipeline itself. +3. A [Codefresh account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) with the Hybrid feature enabled. +4. A [Codefresh CLI token]({{site.baseurl}}/docs/integrations/codefresh-api/#authentication-instructions) that will be used to authenticate your Codefresh account. + +The runner can be installed from any workstation or laptop with access (i.e. via `kubectl`) to the Kubernetes cluster running Codefresh builds. The Codefresh runner will authenticate to your Codefresh account by using the Codefresh CLI token. + +## System Requirements + +Once installed the runner uses the following pods: + +* `runner` - responsible for picking tasks (builds) from the Codefresh API +* `engine` - responsible for running pipelines +* `dind` - responsible for building and using Docker images +* `dind-volume-provisioner` - responsible for provisioning volumes (PV) for dind +* `dind-lv-monitor` - responsible for cleaning **local** volumes + +**CPU/Memory** + +The following table shows **MINIMUM** resources for each component: + +{: .table .table-bordered .table-hover} +| Component | CPU requests| RAM requests | Storage | Type | Always on | +| -------------- | --------------|------------- |-------------------------|-------|-------| +| `runner` | 100m | 100Mi | Doesn't need PV | Deployment | Yes | +| `engine` | 100m | 500Mi | Doesn't need PV | Pod | No | +| `dind` | 400m | 800Mi | 16GB PV | Pod | No | +| `dind-volume-provisioner` | 300m | 400Mi | Doesn't need PV | Deployment | Yes | +| `dind-lv-monitor` | 300m | 400Mi | Doesn't need PV | DaemonSet | Yes | + +Components that are always on consume resources all the time. Components that are not always on only consume resources when pipelines are running (they are created and destroyed automatically for each pipeline). + +Node size and count will depend entirely on how many pipelines you want to be “ready” for and how many will use “burst” capacity. + +* Ready (nodes): Lower initialization time and faster build times. +* Burst (nodes): High initialization time and slower build times. (Not recommended) + +The size of your nodes directly relates to the size required for your pipelines and thus it is dynamic. If you find that only a few larger pipelines require larger nodes you may want to have two Codefresh Runners associated to different node pools. + + +**Storage** + +For the storage options needed by the `dind` pod we suggest: + +* [Local Volumes](https://kubernetes.io/docs/concepts/storage/volumes/#local) `/var/lib/codefresh/dind-volumes` on the K8S nodes filesystem (**default**) +* [EBS](https://aws.amazon.com/ebs/) in the case of AWS. See also the [notes](#installing-on-aws) about getting caching working. +* [Local SSD](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/local-ssd) or [GCE Disks](https://cloud.google.com/compute/docs/disks#pdspecs) in the case of GCP. See [notes](#installing-on-google-kubernetes-engine) about configuration. + + +**Networking Requirements** + +* `dind` - this pod will create an internal network in the cluster to run all the pipeline steps; needs outgoing/egress access to Dockerhub and `quay.io` +* `runner` - this pod needs outgoing/egress access to `g.codefresh.io`; needs network access to [app-proxy]({{site.baseurl}}/docs/administration/codefresh-runner/#optional-installation-of-the-app-proxy) (if app-proxy is used) +* `engine` - this pod needs outgoing/egress access to `g.codefresh.io`, `*.firebaseio.com` and `quay.io`; needs network access to `dind` pod + +All CNI providers/plugins are compatible with the runner components. + +## Installation with the Quick-start Wizard + +Install the Codefresh CLI + +```shell +npm install -g codefresh +``` + +[Alternative install methods](https://codefresh-io.github.io/cli/installation/) + +Authenticate the CLI + +```shell +codefresh auth create-context --api-key {API_KEY} +``` + +You can obtain an API Key from your [user settings page](https://g.codefresh.io/user/settings). +>**Note:** Make sure when you generate the token used to authenticate with the CLI, you generate it with *all scopes*. + +>**Note:** access to the Codefresh CLI is only needed once during the Runner installation. After that, the Runner will authenticate on it own using the details provided. You do NOT need to install the Codefresh CLI on the cluster that is running Codefresh pipelines. + +Then run the wizard with the following command: + +```shell +codefresh runner init +``` + +or + +```shell +codefresh runner init --token +``` + +Brefore proceeding with installation, the wizard asks you some basic questions. + +{% include image.html + lightbox="true" + file="/images/administration/runner/installation-wizard.png" + url="/images/administration/runner/installation-wizard.png" + alt="Codefresh Runner wizard" + caption="Codefresh Runner wizard" + max-width="100%" + %} + +The wizard also creates and runs a sample pipeline that you can see in your Codefresh UI. + +{% include image.html + lightbox="true" + file="/images/administration/runner/sample-pipeline.png" + url="/images/administration/runner/sample-pipeline.png" + alt="Codefresh Runner example pipeline" + caption="Codefresh Runner example pipeline" + max-width="90%" + %} + +That's it! You can now start using the Runner. + +You can also verify your installation with: + +```shell +codefresh runner info +``` + +During installation you can see which API token will be used by the runner (if you don't provide one). The printed token is used by the runner to talk to the Codefresh platform carrying permissions that allow the runner to run pipelines. If you save the token, it can later be used to restore the runner's permissions without creating a new runner installation, if the deployment is deleted. + +**Customizing the Wizard Installation** + +You can customize the wizard installation by passing your own values in the `init` command. +To inspect all available options run `init` with the `--help` flag: + +```shell +codefresh runner init --help +``` + +**Inspecting the Manifests Before they are Installed** + +If you want to see what manifests are used by the installation wizard you can supply the `--dry-run` parameter in the installation process. + +```shell +codefresh runner init --dry-run +``` + +This will execute the wizard in a special mode that will not actually install anything in your cluster. After all configuration questions are asked, all Kubernetes manifests used by the installer will be instead saved locally in a folder `./codefresh_manifests`. + +## Install Codefresh Runner with values file + +To install the Codefresh Runner with pre-defined values file use `--values` flag: + +```shell +codefresh runner init --values values.yaml +``` + +Use [this example](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml) as a starting point for your values file. + +## Install Codefresh Runner with Helm + +To install the Codefresh Runner using Helm, follow these steps: + +1. Download the Codefresh CLI and authenticate it with your Codefresh account. Click [here](https://codefresh-io.github.io/cli/getting-started/) for more detailed instructions. +2. Run the following command to create all of the necessary entities in Codefresh: + + ```shell + codefresh runner init --generate-helm-values-file + ``` + + * This will not install anything on your cluster, except for running cluster acceptance tests, (which may be skipped using the `--skip-cluster-test` option). Please note, that the Runner Agent and the Runtime Environment are still created in your Codefresh account. + * This command will also generate a `generated_values.yaml` file in your current directory, which you will need to provide to the `helm install` command later. If you want to install several Codefresh Runners, you will need a separate `generated_values.yaml` file for each Runner. + +3. Now run the following to complete the installation: + + ```shell + helm repo add cf-runtime https://chartmuseum.codefresh.io/cf-runtime + + helm install cf-runtime cf-runtime/cf-runtime -f ./generated_values.yaml --create-namespace --namespace codefresh + ``` + * Here is the link to a repository with the chart for reference: [https://github.com/codefresh-io/venona/tree/release-1.0/.deploy/cf-runtime](https://github.com/codefresh-io/venona/tree/release-1.0/.deploy/cf-runtime) + +4. At this point you should have a working Codefresh Runner. You can verify the installation by running: + + ```shell + codefresh runner execute-test-pipeline --runtime-name + ``` +>**Note!**
        +Runtime components' (engine and dind) configuration is determined by the `runner init` command.
        +The `helm install` command can only control the configuration of `runner`, `dind-volume-provisioner` and `lv-monitor` components. + +## Using the Codefresh Runner + +Once installed, the Runner is fully automated. It polls the Codefresh SAAS (by default every 3 seconds) on its own and automatically creates all resources needed for running pipelines. + +Once installation is complete, you should see the cluster of the runner as a new [Runtime environment](https://g.codefresh.io/account-admin/account-conf/runtime-environments) in Codefresh in your *Account Settings*, in the respective tab. + +{% include image.html + lightbox="true" + file="/images/administration/runner/runtime-environments.png" + url="/images/administration/runner/runtime-environments.png" + alt="Available runtime environments" + caption="Available runtime environments" + max-width="60%" + %} + +If you have multiple environments available, you can change the default (shown with a thin blue border) by clicking on the 3 dot menu on the right of each environment. The Codefresh runner installer comes with a `set-default` option that is automatically set by default in the new runtime environment. + +You can even override the runtime environment for a specific pipeline by specifying in the respective section in the [pipeline settings]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/). + +{% include image.html + lightbox="true" + file="/images/administration/runner/environment-per-pipeline.png" + url="/images/administration/runner/environment-per-pipeline.png" + alt="Running a pipeline on a specific environment" + caption="Running a pipeline on a specific environment" + max-width="60%" + %} + +## Checking the Runner + +Once installed, the runner is a normal Kubernetes application like all other applications. You can use your existing tools to monitor it. + +Only the runner pod is long living inside your cluster. All other components (such as the engine) are short lived and exist only during pipeline builds. +You can always see what the Runner is doing by listing the resources inside the namespace you chose during installation: + +```shell +$ kubectl get pods -n codefresh-runtime +NAME READY STATUS RESTARTS AGE +dind-5ee7577017ef40908b784388 1/1 Running 0 22s +dind-lv-monitor-runner-hn64g 1/1 Running 0 3d +dind-lv-monitor-runner-pj84r 1/1 Running 0 3d +dind-lv-monitor-runner-v2lhc 1/1 Running 0 3d +dind-volume-provisioner-runner-64994bbb84-lgg7v 1/1 Running 0 3d +engine-5ee7577017ef40908b784388 1/1 Running 0 22s +monitor-648b4778bd-tvzcr 1/1 Running 0 3d +runner-5d549f8bc5-7h5rc 1/1 Running 0 3d +``` + +In the same manner you can list secrets, config-maps, logs, volumes etc. for the Codefresh builds. + +## Uninstall the Codefresh Runner + +You can uninstall the Codefresh runner from your cluster by running: + +```shell +codefresh runner delete +``` + +A wizard, similar to the installation wizard, will ask you questions regarding your cluster before finishing with the removal. + +Like the installation wizard, you can pass the additional options in advance as command line parameters (see `--help` output): +```shell +codefresh runner delete --help +``` + + + +## Runner architecture overview + +{% include image.html + lightbox="true" + file="/images/administration/runner/codefresh_runner.png" + url="/images/administration/runner/codefresh_runner.png" + alt="Codefresh Runner architecture overview" + caption="Codefresh Runner architecture overview" + max-width="100%" + %} + + +1. [Runtime-Environment specification]({{site.baseurl}}/docs/administration/codefresh-runner/) defines engine and dind pods spec and PVC parameters. +2. Runner pod (Agent) pulls tasks (Builds) from Codefresh API every 3 seconds. +3. Once the agent receives build task (either Manual run build or Webhook triggered build) it calls k8s API to create engine/dind pods and PVC object. +4. Volume Provisioner listens for PVC events (create) and based on StorageClass definition it creates PV object with the corresponding underlying volume backend (ebs/gcedisk/local). +5. During the build, each step (clone/build/push/freestyle/composition) is represented as docker container inside dind (docker-in-docker) pod. Shared Volume (`/codefresh/volume`) is represented as docker volume and mounted to every step (docker containers). PV mount point inside dind pod is `/var/lib/docker`. +6. Engine pod controls dind pod. It deserializes pipeline yaml to docker API calls, terminates dind after build has been finished or per user request (sigterm). +7. `dind-lv-monitor` DaemonSet OR `dind-volume-cleanup` CronJob are part of [Runtime Cleaner]({{site.baseurl}}/docs/administration/codefresh-runner/#runtime-cleaners), `app-proxy` Deployment and Ingress are described in the [next section]({{site.baseurl}}/docs/administration/codefresh-runner/#app-proxy-installation), `monitor` Deployment is for [Kubernetes Dashboard]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/). + +## App Proxy installation + +The App Proxy is an **optional** component of the runner that is mainly used when the git provider server is installed on-premises behind the firewall. The App Proxy provides the following features once installed: + +* Enables you to automatically create webhooks for Git in the Codefresh UI (same as the SAAS experience) +* Sends commit status information back to your Git provider (same as the SAAS experience) +* Makes all Git Operations in the GUI work exactly like the SAAS installation of Codefresh + +The requirements for the App proxy is a Kubernetes cluster that: + +1. has already the Codefresh runner installed +1. has an active [ingress controller](https://kubernetes.io/docs/concepts/services-networking/ingress/) +1. allows incoming connections from the VPC/VPN where users are browsing the Codefresh UI. The ingress connection **must** have a hostname assigned for this route and **must** be configured to perform SSL termination + +>Currently the App-proxy works only for Github (SAAS and on-prem versions), Gitlab (SAAS and on-prem versions) and Bitbucket server. + +Here is the architecture of the app-proxy: + +{% include image.html + lightbox="true" + file="/images/administration/runner/app-proxy-architecture.png" + url="/images/administration/runner/app-proxy-architecture.png" + alt="How App Proxy and the Codefresh runner work together" + caption="How App Proxy and the Codefresh runner work together" + max-width="80%" + %} + +Basically when a Git GET operation takes place, the Codefresh UI will contact the app-proxy (if it is present) and it will route the request to the backing Git provider. The confidential Git information never leaves the firewall premises and the connection between the browser and the ingress is SSL/HTTPS. + +The app-proxy has to work over HTTPS and by default it will use the ingress controller to do its SSL termination. Therefore, the ingress controller will need to be configured to perform SSL termination. Check the documentation of your ingress controller (for example [nginx ingress](https://kubernetes.github.io/ingress-nginx/examples/tls-termination/)). This means that the app-proxy does not compromise security in any way. + +To install the app-proxy on a Kubernetes cluster that already has a Codefresh runner use the following command: + +```shell +codefresh install app-proxy --host= +``` + +If you want to install the Codefresh runner and app-proxy in a single command use the following: + +```shell +codefresh runner init --app-proxy --app-proxy-host= +``` + +If you have multiple ingress controllers in the Kubernetes cluster you can use the `--app-proxy-ingress-class` parameter to define which ingress will be used. For additional security you can also define an allowlist for IPs/ranges that are allowed to use the ingress (to further limit the web browsers that can access the Ingress). Check the documentation of your ingress controller for the exact details. + +By default the app-proxy ingress will use the path `hostname/app-proxy`. You can change that default by using the values file in the installation with the flag `--values values.yaml`. + +See the `AppProxy` section in the example [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml#L231-L253). + +```shell +codefresh install app-proxy --values values.yaml +``` + +## Manual Installation of Runner Components + +If you don't want to use the wizard, you can also install the components of the runner yourself. + +The Codefresh runner consists of the following: + +* Runner - responsible for getting tasks from the platform and executing them. One per account. Can handle multiple runtimes +* Runtime - the components that are responsible on runtime for the workflow execution : + * Volume provisioner - (pod’s name prefix dind-volume-provisioner-runner) - responsible for volume provisioning for dind pod + * lv-monitor - (pod’s name prefix dind-lv-monitor-runner) - daemonset - responsible for cleaning volumes + +To install the runner on a single cluster with both the runtime and the agent, execute the following: + +```shell +kubectl create namespace codefresh +codefresh install agent --agent-kube-namespace codefresh --install-runtime +``` + +You can then follow the instructions for [using the runner](#using-the-codefresh-runner). + +### Installing Multiple runtimes with a Single Agent + +It is also possible, for advanced users to install a single agent that can manage multiple runtime environments. + +>NOTE: Please make sure that the cluster where the agent is installed has network access to the other clusters of the runtimes + +```shell +# 1. Create namespace for the agent: +kubectl create namespace codefresh-agent + +# 2. Install the agent on the namespace ( give your agent a unique name as $NAME): +# Note down the token and use it in the second command. +codefresh create agent $NAME +codefresh install agent --token $TOKEN --kube-namespace codefresh-agent +codefresh get agents + +# 3. Create namespace for the first runtime: +kubectl create namespace codefresh-runtime-1 + +# 4. Install the first runtime on the namespace +# 5. the runtime name is printed +codefresh install runtime --runtime-kube-namespace codefresh-runtime-1 + +# 6. Attach the first runtime to agent: +codefresh attach runtime --agent-name $AGENT_NAME --agent-kube-namespace codefresh-agent --runtime-name $RUNTIME_NAME --runtime-kube-namespace codefresh-runtime-1 + +# 7. Restart the runner pod in namespace `codefresh-agent` +kubectl delete pods $RUNNER_POD + +# 8. Create namespace for the second runtime +kubectl create namespace codefresh-runtime-2 + +# 9. Install the second runtime on the namespace +codefresh install runtime --runtime-kube-namespace codefresh-runtime-2 + +# 10. Attach the second runtime to agent and restart the Venona pod automatically +codefresh attach runtime --agent-name $AGENT_NAME --agent-kube-namespace codefresh-agent --runtime-name $RUNTIME_NAME --runtime-kube-namespace codefresh-runtime-2 --restart-agent +``` + +## Configuration Options + +You can fine tune the installation of the runner to better match your environment and cloud provider. + +### Installing on AWS + +If you've installed the Codefresh runner on [EKS](https://aws.amazon.com/eks/) or any other custom cluster (e.g. with kops) in Amazon you need to configure it properly to work with EBS volumes in order to gain [caching]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipeline-caching/). + +> This section assumes you already installed the Runner with default options: `codefresh runner init` + +**Prerequisites** + +`dind-volume-provisioner` deployment should have permissions to create/attach/detach/delete/get ebs volumes. + +There are 3 options: +* running `dind-volume-provisioner` pod on the node (node-group) with iam role +* k8s secret with [aws credentials format](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) mounted to ~/.aws/credentials (or `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` env vars passed) to the `dind-volume-provisioner` pod +* using [Aws Identity for Service Account](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) iam role assigned to `volume-provisioner-runner` service account + +Minimal policy for `dind-volume-provisioner`: +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume" + ], + "Resource": "*" + } + ] +} +``` + +Create Storage Class for EBS volumes: +>Choose **one** of the Availability Zones you want to be used for your pipeline builds. Multi AZ configuration is not supported. + +**Storage Class (gp2)** + +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: dind-ebs +### Specify name of provisioner +provisioner: codefresh.io/dind-volume-provisioner-runner-<-NAMESPACE-> # <---- rename <-NAMESPACE-> with the runner namespace +volumeBindingMode: Immediate +parameters: + # ebs or ebs-csi + volumeBackend: ebs + # Valid zone + AvailabilityZone: us-central1-a # <---- change it to your AZ + # gp2, gp3 or io1 + VolumeType: gp2 + # in case of io1 you can set iops + # iops: 1000 + # ext4 or xfs (default to xfs, ensure that there is xfstools ) + fsType: xfs +``` +**Storage Class (gp3)** + +```yaml +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: dind-ebs +### Specify name of provisioner +provisioner: codefresh.io/dind-volume-provisioner-runner-<-NAMESPACE-> # <---- rename <-NAMESPACE-> with the runner namespace +volumeBindingMode: Immediate +parameters: + # ebs or ebs-csi + volumeBackend: ebs + # Valid zone + AvailabilityZone: us-central1-a # <---- change it to your AZ + # gp2, gp3 or io1 + VolumeType: gp3 + # ext4 or xfs (default to xfs, ensure that there is xfstools ) + fsType: xfs + # I/O operations per second. Only effetive when gp3 volume type is specified. + # Default value - 3000. + # Max - 16,000 + iops: "5000" + # Throughput in MiB/s. Only effective when gp3 volume type is specified. + # Default value - 125. + # Max - 1000. + throughput: "500" +``` + +Apply storage class manifest: +```shell +kubectl apply -f dind-ebs.yaml +``` + +Change your [runtime environment]({{site.baseurl}}/docs/administration/codefresh-runner/#full-runtime-environment-specification) configuration: + +The same AZ you selected before should be used in nodeSelector inside Runtime Configuration: + +To get a list of all available runtimes execute: + +```shell +codefresh get runtime-environments +``` + +Choose the runtime you have just added and get its yaml representation: + +```shell +codefresh get runtime-environments my-eks-cluster/codefresh -o yaml > runtime.yaml +``` + + Under `dockerDaemonScheduler.cluster` block add the nodeSelector `topology.kubernetes.io/zone: `. It should be at the same level as `clusterProvider` and `namespace`. Also, the `pvcs.dind` block should be modified to use the Storage Class you created above (`dind-ebs`). + +`runtime.yaml` example: + +```yaml +version: 1 +metadata: + ... +runtimeScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + nodeSelector: + topology.kubernetes.io/zone: us-central1-a + serviceAccount: codefresh-engine + annotations: {} + userAccess: true + defaultDindResources: + requests: '' + pvcs: + dind: + volumeSize: 30Gi + storageClassName: dind-ebs + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName' +extends: + - system/default/hybrid/k8s_low_limits +description: '...' +accountId: 5f048d85eb107d52b16c53ea +``` + +Update your runtime environment with the [patch command](https://codefresh-io.github.io/cli/operate-on-resources/patch/): + +```shell +codefresh patch runtime-environment my-eks-cluster/codefresh -f runtime.yaml +``` + +If necessary, delete all existing PV and PVC objects left from default local provisioner: +``` +kubectl delete pvc -l codefresh-app=dind -n +kubectl delete pv -l codefresh-app=dind -n +``` + +>You can define all these options above for clean Runner installation with [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml) file: + +`values-ebs.yaml` example: + +```yaml +### Storage parameter example for aws ebs disks +Storage: + Backend: ebs + AvailabilityZone: us-east-1d + VolumeType: gp3 + #AwsAccessKeyId: ABCDF + #AwsSecretAccessKey: ZYXWV + Encrypted: # encrypt volume, default is false + VolumeProvisioner: + ServiceAccount: + Annotations: + eks.amazonaws.com/role-arn: arn:aws:iam:::role/ +NodeSelector: topology.kubernetes.io/zone=us-east-1d +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + topology.kubernetes.io/zone: us-east-1d +``` + +```shell +codefresh runner init --values values-ebs.yaml --exec-demo-pipeline false --skip-cluster-integration true +``` + +### Installing to EKS with Autoscaling + +#### Step 1- EKS Cluster Creation + +See below is a content of cluster.yaml file. We define separate node pools for dind, engine and other services(like runner, cluster-autoscaler etc). + +Before creating the cluster we have created two separate IAM policies: + +* one for our volume-provisioner controller(policy/runner-ebs) that should create and delete volumes +* one for dind pods(policy/dind-ebs) that should be able to attach/detach those volumes to the appropriate nodes using [iam attachPolicyARNs options](https://eksctl.io/usage/iam-policies/). + +`policy/dind-ebs:` + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeVolumes" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "ec2:DetachVolume", + "ec2:AttachVolume" + ], + "Resource": [ + "*" + ] + } + ] +} +``` + +`policy/runner-ebs:` + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume" + ], + "Resource": "*" + } + ] +} +``` + +`my-eks-cluster.yaml` + +```yaml +apiVersion: eksctl.io/v1alpha5 +kind: ClusterConfig +metadata: + name: my-eks + region: us-west-2 + version: "1.15" + +nodeGroups: + - name: dind + instanceType: m5.2xlarge + desiredCapacity: 1 + iam: + attachPolicyARNs: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + - arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess + - arn:aws:iam::XXXXXXXXXXXX:policy/dind-ebs + withAddonPolicies: + autoScaler: true + ssh: # import public key from file + publicKeyPath: ~/.ssh/id_rsa.pub + minSize: 1 + maxSize: 50 + volumeSize: 50 + volumeType: gp2 + ebsOptimized: true + availabilityZones: ["us-west-2a"] + kubeletExtraConfig: + enableControllerAttachDetach: false + labels: + node-type: dind + taints: + codefresh.io: "dinds:NoSchedule" + + - name: engine + instanceType: m5.large + desiredCapacity: 1 + iam: + withAddonPolicies: + autoScaler: true + minSize: 1 + maxSize: 10 + volumeSize: 50 + volumeType: gp2 + availabilityZones: ["us-west-2a"] + labels: + node-type: engine + taints: + codefresh.io: "engine:NoSchedule" + + - name: addons + instanceType: m5.2xlarge + desiredCapacity: 1 + ssh: # import public key from file + publicKeyPath: ~/.ssh/id_rsa.pub + minSize: 1 + maxSize: 10 + volumeSize: 50 + volumeType: gp2 + ebsOptimized: true + availabilityZones: ["us-west-2a"] + labels: + node-type: addons + iam: + attachPolicyARNs: + - arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy + - arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy + - arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess + - arn:aws:iam::XXXXXXXXXXXX:policy/runner-ebs + withAddonPolicies: + autoScaler: true +availabilityZones: ["us-west-2a", "us-west-2b", "us-west-2c"] +``` + +Execute: + +```shell +eksctl create cluster -f my-eks-cluster.yaml +``` + +The config above will leverage [Amazon Linux 2](https://aws.amazon.com/amazon-linux-2/) as the default operating system for the nodes in the nodegroup. To leverage [Bottlerocket-based nodes](https://aws.amazon.com/bottlerocket/), specify the AMI Family using `amiFamily: Bottlerocket` and add the following additional IAM Policies: `arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly` and `arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore`. + +>Bottlerocket is an open source Linux based Operating System specifically built to run containers. It focuses on security, simplicity and easy updates via transactions. Find more information in the [official repository](https://github.com/bottlerocket-os/bottlerocket). + +#### Step 2 - Autoscaler + +Once the cluster is up and running we need to install the [cluster autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html): + +We used iam AddonPolicies `"autoScaler: true"` in the cluster.yaml file so there is no need to create a separate IAM policy or add Auto Scaling group tags, everything is done automatically. + +Deploy the Cluster Autoscaler: + +```shell +kubectl apply -f https://raw.githubusercontent.com/kubernetes/autoscaler/master/cluster-autoscaler/cloudprovider/aws/examples/cluster-autoscaler-autodiscover.yaml +``` + +Add the `cluster-autoscaler.kubernetes.io/safe-to-evict` annotation + +```shell +kubectl -n kube-system annotate deployment.apps/cluster-autoscaler cluster-autoscaler.kubernetes.io/safe-to-evict="false" +``` + +Edit the cluster-autoscaler container command to replace `` with *my-eks*(name of the cluster from cluster.yaml file), and add the following options: + `--balance-similar-node-groups` and `--skip-nodes-with-system-pods=false` + +```shell +kubectl -n kube-system edit deployment.apps/cluster-autoscaler +``` + +```yaml +spec: + containers: + - command: + - ./cluster-autoscaler + - --v=4 + - --stderrthreshold=info + - --cloud-provider=aws + - --skip-nodes-with-local-storage=false + - --expander=least-waste + - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/my-eks + - --balance-similar-node-groups + - --skip-nodes-with-system-pods=false +``` + +We created our EKS cluster with 1.15 version so the appropriate cluster autoscaler version from [https://github.com/kubernetes/autoscaler/releases](https://github.com/kubernetes/autoscaler/releases) is 1.15.6 + +```shell +kubectl -n kube-system set image deployment.apps/cluster-autoscaler cluster-autoscaler=us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler:v1.15.6 +``` + +Check your own version to make sure that the autoscaler version is appropriate. + +#### Step 3 - Optional: We also advise to configure overprovisioning with Cluster Autoscaler + +See details at the [FAQ]( +https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler). + +#### Step 4 - Adding an EKS cluster as a runner to the Codefresh platform with EBS support + +Make sure that you are targeting the correct cluster + +```shell +$ kubectl config current-context +my-aws-runner +``` + +Install the runner passing additional options: + +```shell +codefresh runner init \ +--name my-aws-runner \ +--kube-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--build-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--kube-namespace cf --kube-context-name my-aws-runner \ +--set-value Storage.VolumeProvisioner.NodeSelector=node-type=addons \ +--set-value=Storage.Backend=ebs \ +--set-value=Storage.AvailabilityZone=us-west-2a +``` + +* You should specify the zone in which you want your volumes to be created, example: `--set-value=Storage.AvailabilityZone=us-west-2a` +* (Optional) - if you want to assign the volume-provisioner to a specific node, for example a specific node group what has an IAM role which allows to create EBS volumes, example: `--set-value Storage.VolumeProvisioner.NodeSelector=node-type=addons` + +If you want to use [encrypted EBS volumes](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_key_mgmt) (they are unencrypted by default) - add the custom value `--set-value=Storage.Encrypted=true` +If you already have a key - add its ARN via `--set-value=Storage.KmsKeyId= value`, otherwise a key is generated by AWS. Here is the full command: + +```shell +codefresh runner init \ +--name my-aws-runner \ +--kube-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--build-node-selector=topology.kubernetes.io/zone=us-west-2a \ +--kube-namespace cf --kube-context-name my-aws-runner \ +--set-value Storage.VolumeProvisioner.NodeSelector=node-type=addons \ +--set-value=Storage.Backend=ebs \ +--set-value=Storage.AvailabilityZone=us-west-2a\ +--set-value=Storage.Encrypted=[false|true] \ +--set-value=Storage.KmsKeyId= +``` + +For an explanation of all other options run `codefresh runner init --help` ([global parameter table](#customizing-the-wizard-installation)). + +At this point the quick start wizard will start the installation. + +Once that is done we need to modify the runtime environment of `my-aws-runner` to specify the necessary toleration, nodeSelector and disk size: + +```shell +codefresh get re --limit=100 my-aws-runner/cf -o yaml > my-runtime.yml +``` + +Modify the file my-runtime.yml as shown below: + +```yaml +version: null +metadata: + agent: true + trial: + endingAt: 1593596844167 + reason: Codefresh hybrid runtime + started: 1592387244207 + name: my-aws-runner/cf + changedBy: ivan-codefresh + creationTime: '2020/06/17 09:47:24' +runtimeScheduler: + cluster: + clusterProvider: + accountId: 5cb563d0506083262ba1f327 + selector: my-aws-runner + namespace: cf + nodeSelector: + node-type: engine + tolerations: + - effect: NoSchedule + key: codefresh.io + operator: Equal + value: engine + annotations: {} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5cb563d0506083262ba1f327 + selector: my-aws-runner + namespace: cf + nodeSelector: + node-type: dind + annotations: {} + defaultDindResources: + requests: '' + tolerations: + - effect: NoSchedule + key: codefresh.io + operator: Equal + value: dinds + pvcs: + dind: + volumeSize: 30Gi + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName' + storageClassName: dind-local-volumes-runner-cf + userAccess: true +extends: + - system/default/hybrid/k8s_low_limits +description: 'Runtime environment configure to cluster: my-aws-runner and namespace: cf' +accountId: 5cb563d0506083262ba1f327 +``` + +Apply changes. + +```shell +codefresh patch re my-aws-runner/cf -f my-runtime.yml +``` + +That's all. Now you can go to UI and try to run a pipeline on RE my-aws-runner/cf + +### Injecting AWS arn roles into the cluster + +**Step 1** - Make sure the OIDC provider is connected to the cluster + +See: + +* [https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) +* [https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/](https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/) + +**Step 2** - Create IAM role and policy as explained in [https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html](https://docs.aws.amazon.com/eks/latest/userguide/create-service-account-iam-policy-and-role.html) + +Here, in addition to the policy explained, you need a Trust Relationship established between this role and the OIDC entity. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::${ACCOUNT_ID}:oidc-provider/${OIDC_PROVIDER}" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringEquals": { + "${OIDC_PROVIDER}:sub": "system:serviceaccount:${CODEFRESH_NAMESPACE}:codefresh-engine" + } + } + } + ] +} +``` + +**Step 3** - Annotate the `codefresh-engine` Kubernetes Service Account in the namespace where the Codefresh Runner is installed with the proper IAM role. + +```shell +kubectl annotate -n ${CODEFRESH_NAMESPACE} sa codefresh-engine eks.amazonaws.com/role-arn=${ROLE_ARN} +``` + +Once the annotation is added, you should see it when you describe the Service Account. + +```shell +kubectl describe -n ${CODEFRESH_NAMESPACE} sa codefresh-engine + +Name: codefresh-engine +Namespace: codefresh +Labels: app=app-proxy + version=1.6.8 +Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::123456789012:role/Codefresh +Image pull secrets: +Mountable secrets: codefresh-engine-token-msj8d +Tokens: codefresh-engine-token-msj8d +Events: +``` + +**Step 4** - Using the AWS assumed role identity + +After annotating the Service Account, run a pipeline to test the AWS resource access: + +```yaml +RunAwsCli: + title : Communication with AWS + image : mesosphere/aws-cli + stage: "build" + commands : + - apk update + - apk add jq + - env + - cat /codefresh/volume/sensitive/.kube/web_id_token + - aws sts assume-role-with-web-identity --role-arn $AWS_ROLE_ARN --role-session-name mh9test --web-identity-token file://$AWS_WEB_IDENTITY_TOKEN_FILE --duration-seconds 1000 > /tmp/irp-cred.txt + - export AWS_ACCESS_KEY_ID="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.AccessKeyId")" + - export AWS_SECRET_ACCESS_KEY="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.SecretAccessKey")" + - export AWS_SESSION_TOKEN="$(cat /tmp/irp-cred.txt | jq -r ".Credentials.SessionToken")" + - rm /tmp/irp-cred.txt + - aws s3api get-object --bucket jags-cf-eks-pod-secrets-bucket --key eks-pod2019-12-10-21-18-32-560931EEF8561BC4 getObjectNotWorks.txt +``` + +### Installing behind a proxy + +If you want to deploy the Codefresh runner on a Kubernetes cluster that doesn’t have direct access to `g.codefresh.io`, and has to go trough a proxy server to access `g.codefresh.io`, you will need to follow these additional steps: + +**Step 1** - Follow the installation instructions of the previous section + +**Step 2** - Run `kubectl edit deployment runner -n codefresh-runtime` and add the proxy variables like this + +```yaml +spec: + containers: + - env: + - name: HTTP_PROXY + value: http://:port + - name: HTTPS_PROXY + value: http://:port + - name: http_proxy + value: http://:port + - name: https_proxy + value: http://:port + - name: no_proxy + value: localhost,127.0.0.1, + - name: NO_PROXY + value: localhost,127.0.0.1, +``` + +**Step 3** - Add the following variables to your runtime.yaml, both under the `runtimeScheduler:` and under `dockerDaemonScheduler:` blocks inside the `envVars:` section + +```yaml +HTTP_PROXY: http://:port +http_proxy: http://:port +HTTPS_PROXY: http://:port +https_proxy: http://:port +No_proxy: localhost, 127.0.0.1, +NO_PROXY: localhost, 127.0.0.1, +``` + +**Step 4** - Add `.firebaseio.com` to the allowed-sites of the proxy server + +**Step 5** - Exec into the `dind` pod and run `ifconfig` + +If the MTU value for `docker0` is higher than the MTU value of `eth0` (sometimes the `docker0` MTU is 1500, while `eth0` MTU is 1440) - you need to change this, the `docker0` MTU should be lower than `eth0` MTU + +To fix this, edit the configmap in the codefresh-runtime namespace: + +```shell +kubectl edit cm codefresh-dind-config -n codefresh-runtime +``` + +And add this after one of the commas: +`\"mtu\":1440,` + +### Installing on Rancher RKE 2.X + +#### Step 1 - Configure the kubelet to work with the runner's StorageClass + +The runner's default StorageClass creates the persistent cache volume from local storage on each node. We need to edit the cluster config to allow this. + +In the Rancher UI (v2.5.9 and earlier), drill into the target cluster and then click the Edit Cluster button at the top-right. +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-cluster.png" + url="/images/administration/runner/rancher-cluster.png" + alt="Drill into your cluster and click Edit Cluster on the right" + caption="Drill into your cluster and click Edit Cluster on the right" + max-width="100%" + %} + +In Rancher v2.6+ with the updated UI, open the Cluster Management in the left panel, then click the three-dot menu near the corresponding cluster and select 'Edit Config'. +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-cluster-2.png" + url="/images/administration/runner/rancher-cluster-2.png" + alt="Click Edit Cluster on the right in your cluster list" + caption="Click Edit Cluster on the right in your cluster list" + max-width="100%" + %} + +On the edit cluster page, scroll down to the Cluster Options section and click its **Edit as YAML** button +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-edit-as-yaml.png" + url="/images/administration/runner/rancher-edit-as-yaml.png" + alt="Cluster Options -> Edit as YAML" + caption="Cluster Options -> Edit as YAML" + max-width="100%" + %} +Edit the YAML to include an extra mount in the kubelet service: + +```yaml +rancher_kubernetes_engine_config: + ... + services: + ... + kubelet: + extra_binds: + - '/var/lib/codefresh:/var/lib/codefresh:rshared' +``` + +{% include image.html + lightbox="true" + file="/images/administration/runner/rancher-kublet.png" + url="/images/administration/runner/rancher-kublet.png" + alt="Add volume to rancher_kubernetes_engine_config.services.kublet.extra_binds" + caption="Add volume to rancher_kubernetes_engine_config.services.kublet.extra_binds" + max-width="100%" + %} + +#### Step 2 - Make sure your kubeconfig user is a ClusterAdmin + +The user in your kubeconfig must be a cluster admin in order to install the runner. If you plan to have your pipelines connect to this cluster as a cluster admin, then you can go ahead and create a Codefresh user for this purpose in the Rancher UI with a **non-expiring** kubeconfig token. This is the easiest way to do the installation. + +However, if you want your pipelines to connect to this cluster with less privileges, then you can use your personal user account with Cluster Admin privileges for the installation, and then we'll create a Codefresh account with lesser privileges later (in Step 5). In that case, you can now move on to Step 3. + +Follow these steps to create a Codefresh user with Cluster Admin rights, from the Rancher UI: + +* Click Security at the top, and then choose Users + {% include image.html lightbox="true" file="/images/administration/runner/rancher-security.png" url="/images/administration/runner/rancher-security.png" alt="Create a cluster admin user for Codefresh" caption="Create a cluster admin ser for Codefresh" max-width="100%" %} +* Click the Add User button, and under Global Permissions check the box for **Restricted Administrstor** +* Log out of the Rancher UI, and then log back in as the new user +* Click your user icon at the top-right, and then choose **API & Keys** +* Click the **Add Key** button and create a kubeconfig token with Expires set to Never +* Copy the Bearer Token field (combines Access Key and Secret Key) +* Edit your kubeconfig and put the Bearer Token you copied in the `token` field of your user + +#### Step 3 - Install the Runner + +If you've created your kubeconfig from the Rancher UI, then it will contain an API endpoint that is not reachable internally, from within the cluster. To work around this, we need to tell the runner to instead use Kubernetes' generic internal API endpoint. Also, if you didn't create a Codefresh user in step 2 and your kubeconfig contains your personal user account, then you should also add the `--skip-cluster-integration` option. + +Install the runner with a Codefresh user (ClusterAdmin, non-expiring token): + +```shell +codefresh runner init \ + --set-value KubernetesHost=https://kubernetes.default.svc.cluster.local +``` + +Or install the runner with your personal user account: + +```shell +codefresh runner init \ + --set-value KubernetesHost=https://kubernetes.default.svc.cluster.local \ + --skip-cluster-integration +``` + +The wizard will then ask you some basic questions. + +#### Step 4 - Update the runner's Docker MTU + +By default, RKE nodes use the [Canal CNI](https://rancher.com/docs/rancher/v2.x/en/faq/networking/cni-providers/#canal), which combines elements of Flannel and Calico, and uses VXLAN encapsulation. This VXLAN encapsulation has a 50-byte overhead, thus reducing the MTU of its virtual interfaces from the standard 1500 to 1450. For example, when running `ifconfig` on an RKE 2.5.5 node, you might see several interfaces like this. Note the `MTU:1450`. + +```shell +cali0f8ac592086 Link encap:Ethernet HWaddr ee:ee:ee:ee:ee:ee + inet6 addr: fe80::ecee:eeff:feee:eeee/64 Scope:Link + UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1 + RX packets:11106 errors:0 dropped:0 overruns:0 frame:0 + TX packets:10908 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:0 + RX bytes:922373 (922.3 KB) TX bytes:9825590 (9.8 MB) +``` + +We must reduce the Docker MTU used by the runner's Docker in Docker (dind) pods to fit within this lower MTU. This is stored in a configmap in the namespace where the runner is installed. Assuming that you installed the runner into the `codefresh` namespace, you would edit the configmap like this: + +```shell +kubectl edit cm codefresh-dind-config -n codefresh +``` + +In the editor, update the **daemon.json** field - add `,\"mtu\":1440` just before the last curley brace. + {% include image.html + lightbox="true" + file="/images/administration/runner/rancher-mtu.png" + url="/images/administration/runner/rancher-mtu.png" + alt="Update the runner's Docker MTU" + caption="Update the runner's Docker MTU" + max-width="100%" + %} + +#### Step 5 - Create the Cluster Integration + +If you created a user in Step 2 and used it to install the runner in Step 3, then you can skip this step - your installation is complete! + +However, if you installed the runner with the `--skip-cluster-integration` option then you should follow the documentaion to [Add a Rancher Cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/#adding-a-rancher-cluster) to your Kubernetes Integrations. + +Once complete, you can go to the Codefresh UI and run a pipeline on the new runtime, including steps that deploy to the Kubernetes Integration. + +#### Troubleshooting TLS Errors + +Depending on your Rancher configuration, you may need to allow insecure HTTPS/TLS connections. You can do this by adding an environment variable to the runner deployment. + +Assuming that you installed the runner into the `codefresh` namespace, you would edit the runner deployment like this: + +```shell +kubectl edit deploy runner -n codefresh +``` + +In the editor, add this environment variable under spec.containers.env[]: + +```yaml +- name: NODE_TLS_REJECT_UNAUTHORIZED + value: "0" +``` + +### Installing on Google Kubernetes Engine + +If you are installing Codefresh runner on the Kubernetes cluster on [GKE](https://cloud.google.com/kubernetes-engine/) + +* make sure your user has `Kubernetes Engine Cluster Admin` role in google console and +* bind your user with `cluster-admin` Kubernetes cluster role. + +```shell +kubectl create clusterrolebinding cluster-admin-binding \ + --clusterrole cluster-admin \ + --user $(gcloud config get-value account) +``` + + +#### Storage options on GKE + +**Local SSD** + +If you want to use *LocalSSD* in GKE: + +*Prerequisites:* [GKE cluster with local SSD](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/local-ssd) + +Install Runner with the Wizard: + +```shell +codefresh runner init [options] --set-value=Storage.LocalVolumeParentDir=/mnt/disks/ssd0/codefresh-volumes \ + --build-node-selector=cloud.google.com/gke-local-ssd=true +``` + +Or with `values-example.yaml` values file: + +```yaml +... +### Storage parameters example for gke-local-ssd + Storage: + Backend: local + LocalVolumeParentDir: /mnt/disks/ssd0/codefresh-volumes + NodeSelector: cloud.google.com/gke-local-ssd=true +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + cloud.google.com/gke-local-ssd: "true" +... +``` +```shell +codefresh runner init [options] --values values-example.yaml +``` + +To configure existing Runner with Local SSDs follow this article: + +[How-to: Configuring an existing Runtime Environment with Local SSDs (GKE only)](https://support.codefresh.io/hc/en-us/articles/360016652920-How-to-Configuring-an-existing-Runtime-Environment-with-Local-SSDs-GKE-only-) + + +**GCE Disks** + +If you want to use *GCE Disks*: + +*Prerequisites:* volume provisioner (dind-volume-provisioner) should have permissions to create/delete/get GCE disks + +There are 3 options to provide cloud credentials: + +* run `dind-volume-provisioner-runner` pod on a node with IAM role which is allowed to create/delete/get GCE disks +* create Google Service Account with `ComputeEngine.StorageAdmin` role, download its key in JSON format and pass it to `codefresh runner init` with `--set-file=Storage.GooogleServiceAccount=/path/to/google-service-account.json` +* use [Google Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity) to assign IAM role to `volume-provisioner-runner` service account + +Notice that builds will be running in a single availability zone, so you must specify AvailabilityZone parameters. + + +##### Runner installation with GCE Disks (Google SA JSON key) + +Using the Wizard: + +```shell +codefresh runner init [options] \ + --set-value=Storage.Backend=gcedisk \ + --set-value=Storage.AvailabilityZone=us-central1-c \ + --kube-node-selector=topology.kubernetes.io/zone=us-central1-c \ + --build-node-selector=topology.kubernetes.io/zone=us-central1-c \ + --set-file=Storage.GoogleServiceAccount=/path/to/google-service-account.json +``` + +Using the values `values-example.yaml` file: +```yaml +... +### Storage parameter example for GCE disks + Storage: + Backend: gcedisk + AvailabilityZone: us-central1-c + GoogleServiceAccount: > #serviceAccount.json content + { + "type": "service_account", + "project_id": "...", + "private_key_id": "...", + "private_key": "...", + "client_email": "...", + "client_id": "...", + "auth_uri": "...", + "token_uri": "...", + "auth_provider_x509_cert_url": "...", + "client_x509_cert_url": "..." + } + NodeSelector: topology.kubernetes.io/zone=us-central1-c +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + topology.kubernetes.io/zone: us-central1-c +... +``` +```shell +codefresh runner init [options] --values values-example.yaml +``` + + +##### Runner installation with GCE Disks (Workload Identity with IAM role) + +Using the values `values-example.yaml` file: + +```yaml +... +### Storage parameter example for GCE disks + Storage: + Backend: gcedisk + AvailabilityZone: us-central1-c + VolumeProvisioner: + ServiceAccount: + Annotations: #annotation to the volume-provisioner service account, using the email address of the Google service account + iam.gke.io/gcp-service-account: @.iam.gserviceaccount.com + NodeSelector: topology.kubernetes.io/zone=us-central1-c +... + Runtime: + NodeSelector: # dind and engine pods node-selector (--build-node-selector) + topology.kubernetes.io/zone: us-central1-c +... +``` +```shell +codefresh runner init [options] --values values-example.yaml +``` + +Create the binding between Kubernetes service account and Google service account: + +```shell +export K8S_NAMESPACE=codefresh +export KSA_NAME=volume-provisioner-runner +export GSA_NAME= +export PROJECT_ID= + +gcloud iam service-accounts add-iam-policy-binding \ + --role roles/iam.workloadIdentityUser \ + --member "serviceAccount:${PROJECT_ID}.svc.id.goog[${K8S_NAMESPACE}/${KSA_NAME}]" \ + ${GSA_NAME}@${PROJECT_ID}.iam.gserviceaccount.com +``` + +To configure existing Runner with GCE Disks follow this article: + +[How-to: Configuring an existing Runtime Environment with GCE disks](https://support.codefresh.io/hc/en-us/articles/360016652900-How-to-Configuring-an-existing-Runtime-Environment-with-GCE-disks) + + +##### Using multiple Availability Zones + +Currently, to support effective caching with GCE disks, the builds/pods need to be scheduled in a single AZ (this is more related to a GCP limitation than a Codefresh runner issue). + +If you have Kubernetes nodes running in multiple Availability Zones and wish to use the Codefresh runner we suggest the following: + +**Option A** - Provision a new Kubernetes cluster: a cluster that runs in a single AZ only. - The cluster should be dedicated for usage with the Codefresh runner. This is the preferred solution and avoids extra complexity. + +**Option B** - Install Codefresh runner in your multi-zone cluster, and let it run in the default Node Pool: - in this case, you must specify `--build-node-selector=` (e.g.: `--build-node-selector=topology.kubernetes.io/zone=us-central1-c`) or simply modify the Runtime environment as below: + +```shell +codefresh get re $RUNTIME_NAME -o yaml > re.yaml +``` + +Edit the yaml: + +```yaml +version: 2 +metadata: + ... +runtimeScheduler: + cluster: + nodeSelector: #schedule engine pod onto a node whose labels match the nodeSelector + topology.kubernetes.io/zone: us-central1-c + ... +dockerDaemonScheduler: + cluster: + nodeSelector: #schedule dind pod onto a node whose labels match the nodeSelector + topology.kubernetes.io/zone: us-central1-c + ... + pvcs: + dind: + ... +``` + +Apply changes with: + +```shell +codefresh patch re -f re.yaml +``` + +**Option C** - Like option B, but with a dedicated Node Pool + +**Option D** - Have 2 separate Codefresh runner Runtimes, one for zone A, and the other for zone B, and so on: this technically works, but it will require you to manually set the RE to use for the pipelines that won't use the default Codefresh runner RE. To distribute the pipeline's builds across the Codefresh runner REs. + +For example, let's say Venona-zoneA is the default RE, then, that means that for the pipelines that you want to run in Venona-zoneB, then you'll need to modify their RE settings, and explicitly set Venona-zoneB as the one to use. + +Regarding [Regional Persistent Disks](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/regional-pd), their support is not currently implemented in the Codefresh runner. + + +### Installing on AKS + +**Azure Disks** + +*Prerequisite:* volume provisioner (`dind-volume-provisioner`) should have permissions to create/delete/get Azure Disks + +Minimal IAM Role for dind-volume-provisioner:
        +`dind-volume-provisioner-role.json` +```json +{ + "Name": "CodefreshDindVolumeProvisioner", + "Description": "Perform create/delete/get disks", + "IsCustom": true, + "Actions": [ + "Microsoft.Compute/disks/read", + "Microsoft.Compute/disks/write", + "Microsoft.Compute/disks/delete" + + ], + "AssignableScopes": ["/subscriptions/"] +} +``` + +If you use AKS with managed [identities for node group](https://docs.microsoft.com/en-us/azure/aks/use-managed-identity), you can run the script below to assign `CodefreshDindVolumeProvisioner` role to aks node identity: + +```shell +export ROLE_DEFINITIN_FILE=dind-volume-provisioner-role.json +export SUBSCRIPTION_ID=$(az account show --query "id" | xargs echo ) +export RESOURCE_GROUP=codefresh-rt1 +export AKS_NAME=codefresh-rt1 +export LOCATION=$(az aks show -g $RESOURCE_GROUP -n $AKS_NAME --query location | xargs echo) +export NODES_RESOURCE_GROUP=MC_${RESOURCE_GROUP}_${AKS_NAME}_${LOCATION} +export NODE_SERVICE_PRINCIPAL=$(az aks show -g $RESOURCE_GROUP -n $AKS_NAME --query identityProfile.kubeletidentity.objectId | xargs echo) + +az role definition create --role-definition @${ROLE_DEFINITIN_FILE} +az role assignment create --assignee $NODE_SERVICE_PRINCIPAL --scope /subscriptions/$SUBSCRIPTION_ID/resourceGroups/$NODES_RESOURCE_GROUP --role CodefreshDindVolumeProvisioner +``` + +Now install Codefresh Runner with cli wizard: +```shell +codefresh runner init --set-value Storage.Backend=azuredisk --set Storage.VolumeProvisioner.MountAzureJson=true +``` +Or using [values-example.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml): +```yaml +Storage: + Backend: azuredisk + VolumeProvisioner: + MountAzureJson: true +``` +```shell +codefresh runner init --values values-example.yaml +``` +Or with helm chart [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/charts/cf-runtime/values.yaml): +```yaml +storage: + backend: azuredisk + azuredisk: + skuName: Premium_LRS + +volumeProvisioner: + mountAzureJson: true +``` +```shell +helm install cf-runtime cf-runtime/cf-runtime -f ./generated_values.yaml -f values.yaml --create-namespace --namespace codefresh +``` + + +### Internal Registry Mirror + +You can configure your Codefresh Runner to use an internal registry as a mirror for any container images that are mentioned in your pipelines. + +First setup an internal registry as described in [https://docs.docker.com/registry/recipes/mirror/](https://docs.docker.com/registry/recipes/mirror/). + +Then locate the `codefresh-dind-config` config map in the namespace that houses the runner and edit it. + +```shell +kubectl -n codefresh edit configmap codefresh-dind-config +``` + +Change the `data` field from: + +```yaml +data: + daemon.json: "{\n \"hosts\": [ \"unix:///var/run/docker.sock\",\n \"tcp://0.0.0.0:1300\"],\n + \ \"storage-driver\": \"overlay2\",\n \"tlsverify\": true, \n \"tls\": true,\n + \ \"tlscacert\": \"/etc/ssl/cf-client/ca.pem\",\n \"tlscert\": \"/etc/ssl/cf/server-cert.pem\",\n + \ \"tlskey\": \"/etc/ssl/cf/server-key.pem\",\n \"insecure-registries\" : [\"192.168.99.100:5000\"],\n + \ \"metrics-addr\" : \"0.0.0.0:9323\",\n \"experimental\" : true\n}\n" +``` + +to + +```yaml +data: + daemon.json: "{\n \"hosts\": [ \"unix:///var/run/docker.sock\",\n \"tcp://0.0.0.0:1300\"],\n + \ \"storage-driver\": \"overlay2\",\n \"tlsverify\": true, \n \"tls\": true,\n + \ \"tlscacert\": \"/etc/ssl/cf-client/ca.pem\",\n \"tlscert\": \"/etc/ssl/cf/server-cert.pem\",\n + \ \"tlskey\": \"/etc/ssl/cf/server-key.pem\",\n \"insecure-registries\" : [\"192.168.99.100:5000\"],\n + \ \"registry-mirrors\": [ \"https://\" ], \n + \ \"metrics-addr\" : \"0.0.0.0:9323\",\n \"experimental\" : true\n}\n" +``` + +This adds the line `\ \"registry-mirrors\": [ \"https://\" ], \n` which contains a single registry to use as a mirror. Quit and Save by typing `:wq`. + +Now any container image that is used in your pipeline and isn't fully qualified, will be pulled through the Docker registry that is configured as a mirror. + + +### Installing the monitoring component + +If your cluster is located [behind the firewall](https://codefresh.io/docs/docs/administration/behind-the-firewall/) you might want to use the runner monitoring component to get valuable information about the cluster resources to Codefresh, for example, to [Kubernetes](https://g.codefresh.io/kubernetes/services/) and [Helm Releases](https://g.codefresh.io/helm/releases/releasesNew/) dashboards. + +To install the monitoring component you can use `--install-monitor` flag in the `runner init` command: + +```shell +codefresh runner init --install-monitor +``` + +Please note, that the monitoring component will not be installed if you use `--install-monitor` with `--skip-cluster-integration` flag. In case you want to skip adding the cluster integration during the runner installation, but still want to get the cluster resources to Codefresh dashboards, you can install the monitoring component separately: + +```shell +codefresh install monitor --kube-context-name --kube-namespace --cluster-id --token +``` + + + +## Full runtime environment specification + +The following section contains an explanation of runtime environment specification and possible options to modify it. Notice that there are additional and hidden fields that are autogenerated by Codefresh that complete a full runtime spec. You can't directly see or edit them (unless you run your own [Codefresh On-Premises Installation]({{site.baseurl}}/docs/administration/codefresh-on-prem/) ) + + +To get a list of all available runtimes execute: +```shell +codefresh get runtime-environments +#or +codefresh get re +``` + +Choose the runtime that you want to inspect or modify and get its yaml/json representation: +```shell +codefresh get re my-eks-cluster/codefresh -o yaml > runtime.yaml +#or +codefresh get re my-eks-cluster/codefresh -o json > runtime.json +``` + +Update your runtime environment with the [patch command](https://codefresh-io.github.io/cli/operate-on-resources/patch/): +```shell +codefresh patch re my-eks-cluster/codefresh -f runtime.yaml +``` + +Below is the example for the default and basic runtime spec after you've installed the Runner: + +{% highlight yaml %} +{% raw %} +version: 1 +metadata: + ... +runtimeScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} + userAccess: true + defaultDindResources: + requests: '' + pvcs: + dind: + storageClassName: dind-local-volumes-runner-codefresh +extends: + - system/default/hybrid/k8s_low_limits +description: '...' +accountId: 5f048d85eb107d52b16c53ea +{% endraw %} +{% endhighlight %} + +### Top level fields + +{: .table .table-bordered .table-hover} +| Field name | Type | Value | +| -------------- |-------------------------| -------------------------| +| `version` | string | Runtime environment version | +| `metadata` | object | Meta-information | +| `runtimeScheduler` | object | Engine pod definition | +| `dockerDaemonScheduler` | object | Dind pod definition | +| `extends` | array | System field (links to full runtime spec from Codefresh API) | +| `description` | string | Runtime environment description (k8s context name and namespace) | +| `accountId` | string | Account to which this runtime belongs | +| `appProxy` | object | Optional filed for [app-proxy]({{site.baseurl}}/docs/administration/codefresh-runner/#optional-installation-of-the-app-proxy) | + +### runtimeScheduler fields (engine) + +{: .table .table-bordered .table-hover} +| Field name | Type | Value | +| -------------- |-------------------------| -------------------------| +| `image` | string | Override default engine image | +| `imagePullPolicy` | string | Override image pull policy (default `IfNotPresent`) | +| `type` | string | `KubernetesPod` | +| `envVars` | object | Override or add environment variables passed into the engine pod | +| `userEnvVars` | object | Add external env var(s) to the pipeline. See [Custom Global Environment Variables]({{site.baseurl}}/docs/administration/codefresh-runner/#custom-global-environment-variables) | +| `cluster` | object | k8s related information (`namespace`, `serviceAccount`, `nodeSelector`) | +| `resources` | object | Specify non-default `requests` and `limits` for engine pod | +| `tolerations` | array | Add tolerations to engine pod | +| `annotations` | object | Add custom annotations to engine pod (empty by default `{}`) | +| `labels` | object | Add custom labels to engine pod (empty by default `{}`) | +| `dnsPolicy` | string | Engine pod's [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) | +| `dnsConfig` | object | Engine pod's [DNS config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config) | + +`runtimeScheduler` example: +{% highlight yaml %} +{% raw %} +runtimeScheduler: + imagePullPolicy: Always + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + nodeSelector: #schedule engine pod onto a node whose labels match the nodeSelector + node-type: engine + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} + labels: + spotinst.io/restrict-scale-down: "true" #optional label to prevent node scaling down when the runner is deployed on spot instances using spot.io + envVars: + NODE_TLS_REJECT_UNAUTHORIZED: '0' #disable certificate validation for TLS connections (e.g. to g.codefresh.io) + METRICS_PROMETHEUS_ENABLED: 'true' #enable /metrics on engine pod + DEBUGGER_TIMEOUT: '30' #debug mode timeout duration (in minutes) + userEnvVars: + - name: GITHUB_TOKEN + valueFrom: + secretKeyRef: + name: github-token + key: token + resources: + requests: + cpu: 60m + memory: 500Mi + limits: + cpu: 1000m + memory: 2048Mi + tolerations: + - effect: NoSchedule + key: codefresh.io + operator: Equal + value: engine +{% endraw %} +{% endhighlight %} + +### dockerDaemonScheduler fields (dind) + +| Field name | Type | Value | +| -------------- |-------------------------| -------------------------| +| `dindImage` | string | Override default dind image | +| `type` | string | `DindPodPvc` | +| `envVars` | object | Override or add environment variables passed into the dind pod. See [IN-DIND cleaner]({{site.baseurl}}/docs/administration/codefresh-runner/#cleaners) | +| `userVolumeMounts` with `userVolumes` | object | Add volume mounts to the pipeline See [Custom Volume Mounts]({{site.baseurl}}/docs/administration/codefresh-runner/#custom-volume-mounts) | +| `cluster` | object | k8s related information (`namespace`, `serviceAccount`, `nodeSelector`) | +| `defaultDindResources` | object | Override `requests` and `limits` for dind pod (defaults are `cpu: 400m` and `memory:800Mi` ) | +| `tolerations` | array | Add tolerations to dind pod | +| `annotations` | object | Add custom annotations to dind pod (empty by default `{}`) | +| `labels` | object | Add custom labels to dind pod (empty by default `{}`) | +| `pvc` | object | Override default storage configuration for PersistentVolumeClaim (PVC) with `storageClassName`, `volumeSize`, `reuseVolumeSelector`. See [Volume Reusage Policy]({{site.baseurl}}/docs/administration/codefresh-runner/#volume-reusage-policy) | +| `dnsPolicy` | string | Dind pod's [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) | +| `dnsConfig` | object | Dind pod's [DNS config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config) | + +`dockerDaemonScheduler` example: +{% highlight yaml %} +{% raw %} +dockerDaemonScheduler: + cluster: + clusterProvider: + accountId: 5f048d85eb107d52b16c53ea + selector: my-eks-cluster + nodeSelector: #schedule dind pod onto a node whose labels match the nodeSelector + node-type: dind + namespace: codefresh + serviceAccount: codefresh-engine + annotations: {} + labels: + spotinst.io/restrict-scale-down: "true" #optional label to prevent node scaling down when the runner is deployed on spot instances using spot.io + userAccess: true + defaultDindResources: + requests: '' + limits: + cpu: 1000m + memory: 2048Mi + userVolumeMounts: + my-cert: + name: cert + mountPath: /etc/ssl/cert + readOnly: true + userVolumes: + my-cert: + name: cert + secret: + secretName: tls-secret + pvcs: + dind: + storageClassName: dind-local-volumes-runner-codefresh + volumeSize: 30Gi + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id' + tolerations: + - key: codefresh.io + operator: Equal + value: dinds + effect: NoSchedule +{% endraw %} +{% endhighlight %} + +### Custom Global Environment Variables +You can add your own environment variables in the runtime environment, so that all pipeline steps will have access to it. A typical example would be a shared secret that you want to pass to the pipeline. + +Under the `runtimeScheduler` block you can add an additional element with named `userEnvVars` that follows the same syntax as [secret/environment variables](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables). + +`runtime.yaml` +{% highlight yaml %} +{% raw %} +... +runtimeScheduler: + userEnvVars: + - name: GITHUB_TOKEN + valueFrom: + secretKeyRef: + name: github-token + key: token +... +{% endraw %} +{% endhighlight %} + +### Custom Volume Mounts +You can add your own volume mounts in the runtime environment, so that all pipeline steps have access to the same set of external files. A typical example of this scenario is when you want to make a set of SSL certificates available to all your pipelines. Rather than manually download the certificates in each pipeline, you can provide them centrally on the runtime level. + +Under the `dockerDaemonScheduler` block you can add two additional elements with names `userVolumeMounts` and `userVolumes` (they follow the same syntax as normal k8s `volumes` and `volumeMounts`) and define your own global volumes. + +`runtime.yaml` +{% highlight yaml %} +{% raw %} +... +dockerDaemonScheduler: + userVolumeMounts: + my-cert: + name: cert + mountPath: /etc/ssl/cert + readOnly: true + userVolumes: + my-cert: + name: cert + secret: + secretName: tls-secret +... +{% endraw %} +{% endhighlight %} + +### Debug Timeout Duration + +The default timeout for [debug mode]({{site.baseurl}}/docs/configure-ci-cd-pipeline/debugging-pipelines/) is 14 minutes, and even if the user is actively working, it is still 14 minutes. To change the duration of the debugger, you will need to update your Runtime Spec for the runtime you would like to change. To change the default duration, you will need to add `DEBUGGER_TIMEOUT` to the environment variable. The value you pass is a string value that will define the timeout in minutes. For example, you can pass '30', which will be 30 minutes. + +Under `.runtimeScheduler`, add an `envVars` section, then add `DEBUGGER_TIMEOUT` under `envVars` with the value you want. + +```yaml +... +runtimeScheduler: + envVars: + DEBUGGER_TIMEOUT: '30' +... +``` + +### Volume Reusage Policy + +The behavior of how the volumes are reused depends on volume selector configuration. +`reuseVolumeSelector` option is configurable in runtime environment spec. + +The following options are available: + +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName'` - determined PV can be used by **ANY** pipeline of your account (it's a **default** volume selector). +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id'` - determined PV can be used only by a **single pipeline**. +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id,io.codefresh.branch_name'` - determined PV can be used only by **single pipeline AND single branch**. +* `reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id,trigger'` - determined PV can be used only by **single pipeline AND single trigger**. + +For approach `codefresh-app,io.codefresh.accountName`: + +* Benefit: less PVs --> lower cost (since any PV can be used by any pipeline, then, the cluster would need to keep less PVs in its pool of PVs for Codefresh) +* Downside: since the PV can be used by any pipeline, then, the PVs could have assets and info from different pipelines, thus reducing the probability of cache, + +For approach `codefresh-app,io.codefresh.accountName,pipeline_id`: + +* Benefit: more probability of cache (no "spam" from other pipelines) +* Downside: more PVs to keep (higher cost) + + +To change volume selector get runtime yaml spec and under `dockerDaemonScheduler.pvcs.dind` block specify `reuseVolumeSelector`: + +```yaml + pvcs: + dind: + volumeSize: 30Gi + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id' +``` + +## Runtime Cleaners + +### Key points + +* Codefresh pipelines require disk space for: + * [Pipeline Shared Volume](https://codefresh.io/docs/docs/yaml-examples/examples/shared-volumes-between-builds/) (`/codefresh/volume`, implemented as [docker volume](https://docs.docker.com/storage/volumes/)) + * Docker containers - running and stopped + * Docker images and cached layers +* To improve performance, `volume-provisioner` is able to provision previously used disk with docker images and pipeline volume from previously running builds. It improves performance by using docker cache and decreasing I/O rate. +* Least recently docker images and volumes should be cleaned to avoid out-of-space errors. +* There are several places where pipeline volume cleanup is required, so there are several kinds of cleaner. + +### Cleaners + +* [IN-DIND cleaner](https://github.com/codefresh-io/dind/tree/master/cleaner) - deletes extra docker containers, volumes, images in **dind pod** +* [External volumes cleaner](https://github.com/codefresh-io/runtime-cluster-monitor/blob/master/charts/cf-monitoring/templates/dind-volume-cleanup.yaml) - deletes unused **external** PVs (EBS, GCE/Azure disks) +* [Local volumes cleaner](https://github.com/codefresh-io/dind-volume-utils/blob/master/local-volumes/lv-cleaner.sh) - deletes **local** volumes in case node disk space is close to the threshold + +*** + +#### IN-DIND cleaner + +**Purpose:** Removes unneeded *docker containers, images, volumes* inside kubernetes volume mounted to the dind pod + +**Where it runs:** Running inside each dind pod as script + +**Triggered by:** SIGTERM and also during the run when disk usage (cleaner-agent ) > 90% (configurable) + +**Configured by:** Environment Variables which can be set in Runtime Environment configuration + +**Configuration/Logic:** [README.md](https://github.com/codefresh-io/dind/tree/master/cleaner#readme) + +Override `dockerDaemonScheduler.envVars` on Runtime Environment if necessary (the following are **defaults**): + +```yaml +dockerDaemonScheduler: + envVars: + CLEAN_PERIOD_SECONDS: '21600' # launch clean if last clean was more than CLEAN_PERIOD_SECONDS seconds ago + CLEAN_PERIOD_BUILDS: '5' # launch clean if last clean was more CLEAN_PERIOD_BUILDS builds since last build + IMAGE_RETAIN_PERIOD: '14400' # do not delete docker images if they have events since current_timestamp - IMAGE_RETAIN_PERIOD + VOLUMES_RETAIN_PERIOD: '14400' # do not delete docker volumes if they have events since current_timestamp - VOLUMES_RETAIN_PERIOD + DISK_USAGE_THRESHOLD: '0.8' # launch clean based on current disk usage DISK_USAGE_THRESHOLD + INODES_USAGE_THRESHOLD: '0.8' # launch clean based on current inodes usage INODES_USAGE_THRESHOLD +``` + +*** + +#### External volumes cleaner + +**Purpose:** Removes unused *kubernetes volumes and related backend volumes* + +**Where it runs:** On Runtime Cluster as CronJob +(`kubectl get cronjobs -n codefresh -l app=dind-volume-cleanup`). Installed in case the Runner uses non-local volumes (`Storage.Backend != local`) + +**Triggered by:** CronJob every 10min (configurable), part of [runtime-cluster-monitor](https://github.com/codefresh-io/runtime-cluster-monitor/blob/master/charts/cf-monitoring/templates/dind-volume-cleanup.yaml) and runner deployment + +**Configuration:** + +Set `codefresh.io/volume-retention` annotation on Runtime Environment: + +```yaml +dockerDaemonScheduler: + pvcs: + dind: + storageClassName: dind-ebs-volumes-runner-codefresh + reuseVolumeSelector: 'codefresh-app,io.codefresh.accountName,pipeline_id' + volumeSize: 32Gi + annotations: + codefresh.io/volume-retention: 7d +``` + +Override environment variables for `dind-volume-cleanup` cronjob if necessary: + +* `RETENTION_DAYS` (defaults to 4) +* `MOUNT_MIN` (defaults to 3) +* `PROVISIONED_BY` (defaults to `codefresh.io/dind-volume-provisioner`) + +About *optional* `-m` argument: + +* `dind-volume-cleanup` to clean volumes that were last used more than `RETENTION_DAYS` ago +* `dind-volume-cleanup-m` to clean volumes that were used more than a day ago, but mounted less than `MOUNT_MIN` times + +*** + +#### Local volumes cleaner + +**Purpose:** Deletes local volumes in case node disk space is close to the threshold + +**Where it runs:** On each node on runtime cluster as DaemonSet `dind-lv-monitor`. Installed in case the Runner use local volumes (`Storage.Backend == local`) + +**Triggered by:** Starts clean if disk space usage or inodes usage is more than thresholds (configurable) + +**Configuration:** + +Override environment variables for `dind-lv-monitor` daemonset if necessary: + +* `VOLUME_PARENT_DIR` - default `/var/lib/codefresh/dind-volumes` +* `KB_USAGE_THRESHOLD` - default 80 (percentage) +* `INODE_USAGE_THRESHOLD` - default 80 + +## ARM Builds + +With hybrid runner it's possibe to run native ARM64v8 builds. + +>**Note:** Running both amd64 and arm64 images within the same pipeline - it is not possible. We do not support multi-architecture builds. One runtime configuration - one architecture. Considering one pipeline can map only to one runtime, it is possible to run either amd64 or arm64, but not both within a one pipeline + +The following scenario is an example of how to set up ARM Runner on existing EKS cluster: + +**Step 1 - Preparing nodes** + +Create new ARM nodegroup: + +```shell +eksctl utils update-coredns --cluster +eksctl utils update-kube-proxy --cluster --approve +eksctl utils update-aws-node --cluster --approve + +eksctl create nodegroup \ +--cluster \ +--region \ +--name \ +--node-type \ +--nodes <3>\ +--nodes-min <2>\ +--nodes-max <4>\ +--managed +``` + +Check nodes status: + +```shell +kubectl get nodes -l kubernetes.io/arch=arm64 +``` + +Also it's recommeded to label and taint the required ARM nodes: + +```shell +kubectl taint nodes arch=aarch64:NoSchedule +kubectl label nodes arch=arm +``` + +**Step 2 - Runner installation** + +Use [values.yaml](https://github.com/codefresh-io/venona/blob/release-1.0/venonactl/example/values-example.yaml) to inject `tolerations`, `kube-node-selector`, `build-node-selector` into the Runtime Environment spec. + +`values-arm.yaml` + +```yaml +... +Namespace: codefresh + +### NodeSelector --kube-node-selector: controls runner and dind-volume-provisioner pods +NodeSelector: arch=arm + +### Tolerations --tolerations: controls runner, dind-volume-provisioner and dind-lv-monitor +Tolerations: +- key: arch + operator: Equal + value: aarch64 + effect: NoSchedule +... +######################################################## +### Codefresh Runtime ### +### ### +### configure engine and dind pods ### +######################################################## +Runtime: +### NodeSelector --build-node-selector: controls engine and dind pods + NodeSelector: + arch: arm +### Tolerations for engine and dind pods + tolerations: + - key: arch + operator: Equal + value: aarch64 + effect: NoSchedule +... +``` + +Install the Runner with: + +```shell +codefresh runner init --values values-arm.yaml --exec-demo-pipeline false --skip-cluster-integration true +``` + +**Step 3 - Post-installation fixes** + +Change `engine` image version in Runtime Environment specification: + +```shell +# get the latest engine ARM64 tag +curl -X GET "https://quay.io/api/v1/repository/codefresh/engine/tag/?limit=100" --silent | jq -r '.tags[].name' | grep "^1.*arm64$" +1.136.1-arm64 +``` + +```shell +# get runtime spec +codefresh get re $RUNTIME_NAME -o yaml > runtime.yaml +``` + +under `runtimeScheduler.image` change image tag: + +```yaml +runtimeScheduler: + image: 'quay.io/codefresh/engine:1.136.1-arm64' +``` + +```shell +# patch runtime spec +codefresh patch re -f runtime.yaml +``` + +For `local` storage patch `dind-lv-monitor-runner` DaemonSet and add `nodeSelector`: + +```shell +kubectl edit ds dind-lv-monitor-runner +``` + +```yaml + spec: + nodeSelector: + arch: arm +``` + +**Step 4 - Run Demo pipeline** + +Run a modified version of the *CF_Runner_Demo* pipeline: + +```yaml +version: '1.0' +stages: + - test +steps: + test: + stage: test + title: test + image: 'arm64v8/alpine' + commands: + - echo hello Codefresh Runner! +``` + +## Troubleshooting + +For troubleshooting refer to the [Knowledge Base](https://support.codefresh.io/hc/en-us/sections/4416999487762-Hybrid-Runner) + +## What to read next + +* [Codefresh installation options]({{site.baseurl}}/docs/installation/installation-options/) +* [Codefresh On-Premises]({{site.baseurl}}/docs/administration/codefresh-on-prem/) +* [Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api/) diff --git a/_docs/runtime/git-sources.md b/_docs/installation/git-sources.md similarity index 68% rename from _docs/runtime/git-sources.md rename to _docs/installation/git-sources.md index 2b95dc54..b51913a8 100644 --- a/_docs/runtime/git-sources.md +++ b/_docs/installation/git-sources.md @@ -1,23 +1,23 @@ --- -title: "Add Git Sources to runtimes" +title: "Add Git Sources to GitOps Runtimes" description: "" -group: runtime +group: installation toc: true --- -A Git Source is the equivalent of an Argo CD application that tracks a Git repository and syncs the desired state of the repo to the destination K8s cluster. In addition to application resources, the Git Source can store resources for Codefresh runtimes, and CI/CD entities such as delivery pipelines, Workflow Templates, workflows, and applications. +A Git Source is the equivalent of an Argo CD application that tracks a Git repository and syncs the desired state of the repo to the destination K8s cluster. In addition to application resources, the Git Source can store resources for GitOps Runtimes, and CI/CD entities such as delivery pipelines, Workflow Templates, workflows, and applications. -Provisioning a runtime automatically creates a Git Source that stores resources for the runtime and for the demo CI pipelines that are optionally installed with the runtime. Every Git Source is associated with a Codefresh runtime. A runtime can have one or more Git Sources. You can add Git Sources at any time, to the same or to different runtimes. +Provisioning a Runtime automatically creates a Git Source that stores resources for the Runtime and for the demo CI pipelines that are optionally installed with the Runtime. Every Git Source is associated with a Runtime. You can add Git Sources at any time, to the same or to different Runtimes. -Once you create a Git Source for a runtime, you can store resources for CI/CD entities associated with that runtime. For example, when creating pipelines or applications, you can select the Git Source to which to store manifest definitions. +Once you create a Git Source for a Runtime, you can store resources for CI/CD entities associated with it. For example, when creating pipelines or applications, you can select the Git Source to which to store manifest definitions. ### View Git Sources and definitions Drill down on a runtime in List View to see its Git Sources. -1. In the Codefresh UI, go to the [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"} page. -1. From the **List View** (the default), select a runtime name, and then select the **Git Sources** tab. +1. In the Codefresh UI, go to the [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"} page. +1. From the **List View** (the default), select a Runtime name, and then select the **Git Sources** tab. {% include image.html @@ -34,12 +34,12 @@ Drill down on a runtime in List View to see its Git Sources. 1. To see the definitions for the Git Source, select the three dots at the end of the row. ### Create a Git Source -Create Git Sources for any provisioned runtime. The Git Sources are available to store resources for pipelines or applications when you create them. +Create Git Sources for any provisioned Runtime. The Git Sources are available to store resources for pipelines or applications when you create them. >Make sure you are in the List View to create Git Sources. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. -1. In the List View, select the runtime for which to add a Git Source, and then select the **Git Sources** tab. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. +1. In the List View, select the Runtime for which to add a Git Source, and then select the **Git Sources** tab. 1. Select **Create Git Sources**, and in the Create Git Source panel, define the definitions for the Git Source: {% include @@ -56,7 +56,7 @@ Create Git Sources for any provisioned runtime. The Git Sources are available t * **Source**: The Git repo with the desired state, tracked by the Git Source, and synced to the destination cluster. * **Repository**: Mandatory. The URL to the Git repo. * **Branch**: Optional. The specific branch within the repo to track. - * **Path**: Optional. The specific path within the repo, and branch, if one is specified, to track. + * **Path**: Optional. The specific path within the repo, and branch if one is specified, to track. * **Destination**: The destination cluster with the actual state to which to apply the changes from the **Source**. * **Namespace**: The namespace in the destination cluster to which to sync the changes. @@ -73,8 +73,8 @@ Create Git Sources for any provisioned runtime. The Git Sources are available t Edit an existing Git Source by changing the source and destination definitions. > You cannot change the name of the Git Source. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. -1. From the **List View** (the default), select the runtime with the Git Source, and then select the **Git Sources** tab. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. +1. From the **List View** (the default), select the Runtime with the Git Source, and then select the **Git Sources** tab. 1. In the row with the Git Source to edit, select the three dots, and then select **Edit** in the panel that appears. {% include @@ -90,12 +90,12 @@ Edit an existing Git Source by changing the source and destination definitions. 1. Change the **Source** and **Destination** definitions for the Git Source, and select **Save**. ### View/download logs for a Git Source -View online logs for any Git Source associated with a runtime, and if needed, download the log file for offline viewing and analysis. -Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events from the application launch to the date and time of download. +View online logs for any Git Source associated with a Runtime, and if needed, download the log file for offline viewing and analysis. +Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events, from the application launch to the date and time of download. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. -1. From the **List View** (the default), select the runtime with the Git Source, and then select the **Git Sources** tab. -1. In the row with the Git Source foe which to view/download logs, select the three dots, and then select **View Logs**. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes**){:target="\_blank"}. +1. From the **List View** (the default), select the Runtime with the Git Source, and then select the **Git Sources** tab. +1. In the row with the Git Source for which to view/download logs, select the three dots, and then select **View Logs**. {% include image.html @@ -127,6 +127,7 @@ Online logs show up to 1000 of the most recent events (lines), updated in real t The file is downloaded with `.log` extension. ### What to read next -[Manage runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Recover runtimes]({{site.baseurl}}/docs/runtime/runtime-recovery/) +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/monitor-manage-runtimes/) +[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) + diff --git a/_docs/runtime/hosted-runtime.md b/_docs/installation/hosted-runtime.md similarity index 66% rename from _docs/runtime/hosted-runtime.md rename to _docs/installation/hosted-runtime.md index 0a08ba3b..cfc64c7e 100644 --- a/_docs/runtime/hosted-runtime.md +++ b/_docs/installation/hosted-runtime.md @@ -1,18 +1,28 @@ --- -title: "Set up a hosted runtime environment" -description: "" -group: runtime +title: "Hosted GitOps Runtime setup" +description: "Provision Hosted GitOps environment" +group: installation toc: true --- -If you have Codefresh's Hosted GitOps, set up your hosted environment, and you are all ready to leverage extensive CD Ops capabilities. -Read about [Hosted GitOps]({{site.baseurl}}/docs/incubation/intro-hosted-runtime/). +Set up your hosted environment with the Hosted GitOps Runtime to leverage extensive CD capabilities. + -### Where to start with Hosted GitOps -If you have not provisioned a hosted runtime, Codefresh presents you with the setup instructions in the **Home** dashboard. +## System requirements for Hosted GitOps Runtimes +{: .table .table-bordered .table-hover} +| Item | Requirement | +| -------------- | -------------- | +|Kubernetes cluster | Server version 1.18 and higher to which to deploy applications| +|Git provider | {::nomarkdown}
        • GitHub
        • Bitbucket Cloud
        {:/}| + + +## Where to start with Hosted GitOps Runtimes +If you have not provisioned a Hosted GitOps Runtime, Codefresh presents you with the setup instructions in the **Home** dashboard. + + * In the Codefresh UI, go to Codefresh [Home](https://g.codefresh.io/2.0/?time=LAST_7_DAYS){:target="\_blank"}. Codefresh guides you through the three-step setup, as described below. @@ -27,18 +37,18 @@ caption="Hosted GitOps setup" max-width="80%" %} - >You can provision a single hosted runtime for your Codefresh account. + >You can provision a single Hosted GitOps Runtime per Codefresh account. -### 1. Provision hosted runtime -Start installing the hosted runtime with a single-click. Codefresh completes the installation without any further intervention on your part. -The hosted runtime is provisioned on the Codefresh cluster, and completely managed by Codefresh with automatic version and security upgrades. +## Step 1: Install Hosted GitOps Runtime +Start installing the Hosted GitOps Runtime with a single-click. Codefresh completes the installation without any further intervention on your part. +The Hosted GitOps Runtime is provisioned on the Codefresh cluster, and completely managed by Codefresh with automatic version and security upgrades. 1. Do one of the following: - * To set up Hosted GitOps later, click **Install later**, and continue from step _2_. + * To set up Hosted GitOps Runtime later, click **Install later**, and continue from step _2_. * To start setup, click **Install**, and continue from step _3_. {% include @@ -46,16 +56,16 @@ image.html lightbox="true" file="/images/runtime/hosted-installing.png" url="/images/runtime/hosted-installing.png" -alt="Step 1: Installing hosted runtime" -caption="Step 1: Installing hosted runtime" +alt="Step 1: Installing Hosted GitOps Runtime" +caption="Step 1: Installing Hosted GitOps Runtime" max-width="80%" %} {:start="2"} 1. Do the following: - * In the Codefresh UI, go to [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and click **+ Add Runtimes**. - * Select **Hosted Runtime** and click **Add**. - >An account can be provisioned with a single hosted runtime. If you have already provisioned a hosted runtime for your account, the Hosted Runtime option is disabled. + * In the Codefresh UI, go to [**GitOps Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and click **+ Add Runtimes**. + * Select **Hosted GitOps Runtime** and click **Add**. + >An account can be provisioned with a single Hosted GitOps Runtime. If you have already provisioned a Hosted GitOps Runtime for your account, the Hosted GitOps Runtime option is disabled. * Continue from _step 3_. {% include @@ -63,14 +73,14 @@ image.html lightbox="true" file="/images/runtime/hosted-install-later.png" url="/images/runtime/hosted-install-later.png" -alt="Install hosted runtime" -caption="Install hosted runtime" +alt="Install Hosted GitOps Runtime" +caption="Install Hosted GitOps Runtime" max-width="40%" %} {:start="3"} -1. When complete, to view the components for the hosted runtime, click **View Runtime**. +1. When complete, to view the components for the Hosted GitOps Runtime, click **View Runtime**. You are directed to the Runtime Components tab. {% include @@ -78,14 +88,14 @@ image.html lightbox="true" file="/images/runtime/hosted-runtime-components.png" url="/images/runtime/hosted-runtime-components.png" -alt="Runtime components for hosted runtime" -caption="Runtime components for hosted runtime" +alt="Runtime components for Hosted GitOps Runtime" +caption="Runtime components for Hosted GitOps Runtime" max-width="70%" %} > The Git Sources and the Managed Clusters are empty as they will be set up in the next steps. -If you navigate to **Runtimes > List View**, you can identify the hosted runtime through the Type column (Hosted ), the Cluster/Namespace column (Codefresh), and the Module column (CD Ops). +If you navigate to **Runtimes > List View**, you can identify the Hosted GitOps Runtime through the Type column (Hosted), the Cluster/Namespace column (Codefresh), and the Module column (CD Ops). {% include image.html @@ -97,8 +107,8 @@ caption="Hosted runtimes in List view" max-width="70%" %} -#### Troubleshoot failed hosted runtime installation -Your hosted runtime may fail to install with an error as in the image below. We are closely moinitoring the hosted runtime installation process and activley working to prevent and iron out all installation errors. Follow the instructions to uninstall and reinstall the hosted runtime. +### Troubleshoot failed Hosted GitOps Runtime installation +Your Hosted GitOps Runtime may fail to install with an error as in the image below. We are closely moinitoring the Hosted GitOps Runtime installation process and activley working to prevent and iron out all installation errors. Follow the instructions to uninstall and reinstall the Hosted GitOps Runtime. {% include image.html @@ -117,16 +127,16 @@ max-width="70%" To compare with the latest version from Codefresh, [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"}. * [Download the CLI]({{site.baseurl}}/docs/clients/csdp-cli/). -1. Uninstall the failed hosted runtime: +1. Uninstall the failed Hosted GitOps Runtime: `cf runtime uninstall codefresh-hosted --force` where: - `hosted-codefresh` is the name of your hosted runtime, automatically assigned by Codefresh. + `hosted-codefresh` is the name of your Hosted GitOps Runtime, automatically assigned by Codefresh. 1. In the Codefresh UI, return to Codefresh [Home](https://g.codefresh.io/2.0/?time=LAST_7_DAYS){:target="\_blank"}. 1. Refresh the page and start with _1. Provision hosted runtime_ above. -### 2. Connect Git provider -Connect your hosted runtime to a Git provider for Codefresh to create the required Git repos. First authorize access to your Git provider through an OAuth token, and then select the Git organizations or accounts in which to create the required Git repos. +### Step 2: Connect Git provider +Connect your Hosted GitOps Runtime to a Git provider for Codefresh to create the required Git repos. First authorize access to your Git provider through an OAuth token, and then select the Git organizations or accounts in which to create the required Git repos. >Only authorized organizations are displayed in the list. To authorize organizations for the Codefresh application in GitHub, see [Authorize organizations/projects]({{site.baseurl}}/docs/administration/hosted-authorize-orgs/). @@ -145,12 +155,12 @@ max-width="80%" Once you authorize access, Codefresh creates two Git repositories, one to store the runtime configuration settings, and the other to store the runtime's application settings: * Shared runtime configuration repo - The shared runtime configuration repo is a centralized Git repository that stores configuration settings for the hosted runtime. Additional runtimes provisioned for the account can point to this repo to retrieve and reuse the configuration. + The shared runtime configuration repo is a centralized Git repository that stores configuration settings for the Hosted GitOps Runtime. Additional runtimes provisioned for the account can point to this repo to retrieve and reuse the configuration. Read about [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/). * Git Source application repo - Codefresh creates a Git Source application repo for every hosted runtime. + Codefresh creates a Git Source application repo for every Hosted GitOps Runtime. Read about [Git sources]({{site.baseurl}}/docs/runtime/git-sources/). @@ -224,15 +234,15 @@ image.html lightbox="true" file="/images/runtime/hosted-git-source-in-ui.png" url="/images/runtime/hosted-git-source-in-ui.png" -alt="Git Source tab for hosted runtime" -caption="Git Source tab for hosted runtime" +alt="Git Source tab for Hosted GitOps Runtime" +caption="Git Source tab for Hosted GitOps Runtime" max-width="80%" %} ### 3. Connect a Kubernetes cluster -Connect a destination cluster to the hosted runtime and register it as a managed cluster. Deploy applications and configuration to the cluster. +Connect a destination cluster to the Hosted GitOps Runtime and register it as a managed cluster. Deploy applications and configuration to the cluster. For managed cluster information and installing Argo Rollouts, see [Add and manage external clusters]({{site.baseurl}}/docs/runtime/managed-cluster/). @@ -241,8 +251,8 @@ image.html lightbox="true" file="/images/runtime/hosted-connect-cluster-step.png" url="/images/runtime/hosted-connect-cluster-step.png" -alt="Step 3: Connect a K8s cluster for hosted runtime" -caption="Step 3: Connect a K8s cluster for hosted runtime" +alt="Step 3: Connect a K8s cluster for Hosted GitOps Runtime" +caption="Step 3: Connect a K8s cluster for Hosted GitOps Runtime" max-width="70%" %} @@ -273,8 +283,8 @@ max-width="70%" lightbox="true" file="/images/runtime/hosted-new-cluster-topology.png" url="/images/runtime/hosted-new-cluster-topology.png" - alt="New K8s cluster in hosted runtime" - caption="New K8s cluster in hosted runtime" + alt="New K8s cluster in Hosted GitOps Runtime" + caption="New K8s cluster in Hosted GitOps Runtime" max-width="80%" %} @@ -287,7 +297,7 @@ If you could not connect a cluster, you may not have the latest version of the C To compare with the latest version from Codefresh, [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"}. * [Download the CLI]({{site.baseurl}}/docs/clients/csdp-cli/). -You have completed setting up your hosted runtime. You are ready to create applications, and connect third-party CI tools for image enrichment. +You have completed setting up your Hosted GitOps Runtime. You are ready to create applications, and connect third-party CI tools for image enrichment. ### (Optional) Create application Optional. Create an application in Codefresh, deploy it to the cluster, and track deployment and performance in the Applications dashboard. @@ -305,8 +315,9 @@ Optional. Integrate Codefresh with the third-party tools you use for CI to enric [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) ### Related articles -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/monitor-manage-runtimes/) +[Add Git Sources to runtimes]({{site.baseurl}}/docs/installation/git-sources/) +[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) [Home dashboard]({{site.baseurl}}/docs/reporting/home-dashboard/) [DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) diff --git a/_docs/installation/hybrid-gitops.md b/_docs/installation/hybrid-gitops.md new file mode 100644 index 00000000..889c8d29 --- /dev/null +++ b/_docs/installation/hybrid-gitops.md @@ -0,0 +1,1282 @@ +--- +title: "Hybrid GitOps Runtime installation" +description: "Provision Hybrid GitOps Runtimes" +group: installation +toc: true +--- + +Provision one or more Hybrid GitOps Runtimes in your Codefresh account. +Start by reviewing [system requirements](#minimum-system-requirements) for Hybrid GitOps. If you are installing with ingress-controllers, you must configure them as required _before_ starting the installation. + +> To provision a Hosted GitOps Runtime, see [Provision a hosted runtime]({{site.baseurl}}/docs/installation/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/installation/hosted-runtime/). + +**Git providers and Hybrid Runtimes** +Your Codefresh account is always linked to a specific Git provider. This is the Git provider you select on installing the first GitOps Runtime, either Hybrid or Hosted, in your Codefresh account. All the Hybrid Runtimes you install in the same account use the same Git provider. +If Bitbucker Server is your Git provider, you must also select the specific server instance to associate with the runtime. + +>To change the Git provider for your Codefresh account after installation, contact Codefresh support. + + +**Hybrid Runtimes** + The Hybrid Runtime comprises Argo CD components and Codefresh-specific components. The Argo CD components are derived from a fork of the Argo ecosystem, and do not correspond to the open-source versions available. + +There are two parts to installing a Hybrid GitOps Runtime: + +1. [Installing the Codefresh CLI](#gitops-cli-installation) +2. [Installing the Hybrid GitOps Runtime](#install-hybrid-gitops-runtime), either through the CLI wizard or via silent installation through the installation flags. + The Hybrid GitOps Runtime is installed in a specific namespace on your cluster. You can install more Runtimes on different clusters in your deployment. + Every Hybrid GitOps Runtime installation makes commits to three Git repos: + * Runtime install repo: The installation repo that manages the Hybrid Runtime itself with Argo CD. If the repo URL does not exist, it is automatically created during installation. + * Git Source repo: Created automatically during Runtime installation. The repo where you store manifests for pipelines and applications. See [Git Sources]({{site.baseurl}}/docs/runtime/git-sources). + * Shared configuration repo: Created for the first GitOps Runtime installed in a user account. The repo stores configuration manifests for account-level resources and is shared with other GitOps Runtimes in the same account. See [Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration). + + + +{::nomarkdown} +
        +{:/} + +## Minimum system requirements + +{: .table .table-bordered .table-hover} +| Item | Requirement | +| -------------- | -------------- | +|Kubernetes cluster | Server version 1.18 and higher, without Argo Project components. {::nomarkdown}
        Tip: To check the server version, run:
        kubectl version --short.{:/}| +| Ingress controller| Configured on Kubernetes cluster and exposed from the cluster. {::nomarkdown}
        Supported and tested ingress controllers include:
        • Ambassador
        • {:/}(see [Ambassador ingress configuration](#ambassador-ingress-configuration)){::nomarkdown}
        • AWS ALB (Application Load Balancer)
        • {:/} (see [AWS ALB ingress configuration](#aws-alb-ingress-configuration)){::nomarkdown}
        • Istio
        • {:/} (see [Istio ingress configuration](#istio-ingress-configuration)){::nomarkdown}
        • NGINX Enterprise (nginx.org/ingress-controller)
        • {:/} (see [NGINX Enterprise ingress configuration](#nginx-enterprise-ingress-configuration)){::nomarkdown}
        • NGINX Community (k8s.io/ingress-nginx)
        • {:/} (see [NGINX Community ingress configuration](#nginx-community-version-ingress-configuration)){::nomarkdown}
        • Trafik
        • {:/}(see [Traefik ingress configuration](#traefik-ingress-configuration))| +|Node requirements| {::nomarkdown}
          • Memory: 5000 MB
          • CPU: 2
          {:/}| +|Cluster permissions | Cluster admin permissions | +|Git providers |{::nomarkdown}
          • GitHub
          • GitHub Enterprise
          • GitLab Cloud
          • GitLab Server
          • Bitbucket Cloud
          • Bitbucket Server
          {:/}| +|Git access tokens | {::nomarkdown}Git runtime token:
          • Valid expiration date
          • Scopes:
            • GitHub and GitHub Enterprise: repo, admin-repo.hook
            • GitLab Cloud and GitLab Server: api, read_repository
            • Bitbucket Cloud and Server: Permissions: Read, Workspace membership: Read, Webhooks: Read and write, Repositories: Write, Admin
            {:/}| + +## Ingress controller configuration + +### Ambassador ingress configuration +For detailed configuration information, see the [Ambassador ingress controller documentation](https://www.getambassador.io/docs/edge-stack/latest/topics/running/ingress-controller){:target="\_blank"}. + +This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. +* Valid external IP address +* Valid TLS certificate +* TCP support + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + {::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +

            +{:/} + +### AWS ALB ingress configuration + +For detailed configuration information, see the [ALB AWS ingress controller documentation](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4){:target="\_blank"}. + +This table lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} +| What to configure | When to configure | +| -------------- | -------------- | +|Valid external IP address | _Before_ installing hybrid runtime | +|Valid TLS certificate | | +|TCP support| | +|Controller configuration] | | +|Alias DNS record in route53 to load balancer | _After_ installing hybrid runtime | +|(Optional) Git integration registration | | + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + +#### Controller configuration +In the ingress resource file, verify that `spec.controller` is configured as `ingress.k8s.aws/alb`. + +```yaml +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: alb +spec: + controller: ingress.k8s.aws/alb +``` + +{::nomarkdown} +
            +{:/} + +#### Create an alias to load balancer in route53 + +> The alias must be configured _after_ installing the hybrid runtime. + +1. Make sure a DNS record is available in the correct hosted zone. +1. _After_ hybrid runtime installation, in Amazon Route 53, create an alias to route traffic to the load balancer that is automatically created during the installation: + * **Record name**: Enter the same record name used in the installation. + * Toggle **Alias** to **ON**. + * From the **Route traffic to** list, select **Alias to Application and Classic Load Balancer**. + * From the list of Regions, select the region. For example, **US East**. + * From the list of load balancers, select the load balancer that was created during installation. + +For more information, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html){:target="\_blank"}. + +{% include image.html + lightbox="true" + file="/images/runtime/post-install-alb-ingress.png" + url="/images/runtime/post-install-alb-ingress.png" + alt="Route 53 record settings for AWS ALB" + caption="Route 53 record settings for AWS ALB" + max-width="60%" +%} + +{::nomarkdown} +
            +{:/} + +#### (Optional) Git integration registration +If the installation failed, as can happen if the DNS record was not created within the timeframe, manually create and register Git integrations using these commands: + `cf integration git add default --runtime --api-url ` + `cf integration git register default --runtime --token ` + +{::nomarkdown} +

            +{:/} + +### Istio ingress configuration +For detailed configuration information, see [Istio ingress controller documentation](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress){:target="\_blank}. + +The table below lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} +| What to configure | When to configure | +| -------------- | -------------- | +|Valid external IP address |_Before_ installing hybrid runtime | +|Valid TLS certificate| | +|TCP support | | +|Cluster routing service | _After_ installing hybrid runtime | + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + + + +#### Cluster routing service +> The cluster routing service must be configured _after_ installing the hybrid runtime. + +Based on the runtime version, you need to configure a single or multiple `VirtualService` resources for the `app-proxy`, `webhook`, and `workflow` services. + +##### Runtime version 0.0.543 or higher +Configure a single `VirtualService` resource to route traffic to the `app-proxy`, `webhook`, and `workflow` services, as in the example below. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + namespace: pov-codefresh-istio-runtime # replace with your runtime name + name: internal-router +spec: + hosts: + - pov-codefresh-istio-runtime.sales-dev.codefresh.io # replace with your host name + gateways: + - istio-system/internal-router # replace with your gateway name + http: + - match: + - uri: + prefix: /webhooks + route: + - destination: + host: internal-router + port: + number: 80 + - match: + - uri: + prefix: /app-proxy + route: + - destination: + host: internal-router + port: + number: 80 + - match: + - uri: + prefix: /workflows + route: + - destination: + host: internal-router + port: + number: 80 +``` + +##### Runtime version 0.0.542 or lower + +Configure two different `VirtualService` resources, one to route traffic to the `app-proxy`, and the second to route traffic to the `webhook` services, as in the examples below. + +{::nomarkdown} +
            +{:/} + +**`VirtualService` example for `app-proxy`:** + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + namespace: test-runtime3 # replace with your runtime name + name: cap-app-proxy +spec: + hosts: + - my.support.cf-cd.com # replace with your host name + gateways: + - my-gateway # replace with your host name + http: + - match: + - uri: + prefix: /app-proxy + route: + - destination: + host: cap-app-proxy + port: + number: 3017 +``` + +**`VirtualService` example for `webhook`:** + +> Configure a `uri.prefix` and `destination.host` for each event-source if you have more than one. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + namespace: test-runtime3 # replace with your runtime name + name: csdp-default-git-source +spec: + hosts: + - my.support.cf-cd.com # replace with your host name + gateways: + - my-gateway # replace with your gateway name + http: + - match: + - uri: + prefix: /webhooks/test-runtime3/push-github # replace `test-runtime3` with your runtime name, and `push-github` with the name of your event source + route: + - destination: + host: push-github-eventsource-svc # replace `push-github' with the name of your event source + port: + number: 80 + - match: + - uri: + prefix: /webhooks/test-runtime3/cypress-docker-images-push # replace `test-runtime3` with your runtime name, and `cypress-docker-images-push` with the name of your event source + route: + - destination: + host: cypress-docker-images-push-eventsource-svc # replace `cypress-docker-images-push` with the name of your event source + port: + number: 80 +``` + +{::nomarkdown} +

            +{:/} + +### NGINX Enterprise ingress configuration + +For detailed configuration information, see [NGINX ingress controller documentation](https://docs.nginx.com/nginx-ingress-controller){:target="\_blank}. + +The table below lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} +| What to configure | When to configure | +| -------------- | -------------- | +|Verify valid external IP address |_Before_ installing hybrid runtime | +|Valid TLS certificate | | +|TCP support| | +|NGINX Ingress: Enable report status to cluster | | +|NGINX Ingress Operator: Enable report status to cluster| | +|Patch certificate secret |_After_ installing hybrid runtime + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + +#### NGINX Ingress: Enable report status to cluster + +If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. + +* Pass `--report-ingress-status` to `deployment`. + +```yaml +spec: + containers: + - args: + - --report-ingress-status +``` + +{::nomarkdown} +
            +{:/} + +#### NGINX Ingress Operator: Enable report status to cluster + +If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. + +1. Add this to the `Nginxingresscontrollers` resource file: + + ```yaml + ... + spec: + reportIngressStatus: + enable: true + ... + ``` + +1. Make sure you have a certificate secret in the same namespace as the runtime. Copy an existing secret if you don't have one. +You will need to add this to the `ingress-master` when you have completed runtime installation. + +{::nomarkdown} +
            +{:/} + +#### Patch certificate secret +> The certificate secret must be configured _after_ installing the hybrid runtime. + +Patch the certificate secret in `spec.tls` of the `ingress-master` resource. +The secret must be in the same namespace as the runtime. + +1. Go to the runtime namespace with the NGINX ingress controller. +1. In `ingress-master`, add to `spec.tls`: + + ```yaml + tls: + - hosts: + - + secretName: + ``` + +{::nomarkdown} +

            +{:/} + +### NGINX Community version ingress configuration + +Codefresh has been tested with and supports implementations of the major providers. For your convenience, we have provided configuration instructions, both for supported and untested providers in [Provider-specific configuration](#provider-specific-configuration). + + +This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. +* Verify valid external IP address +* Valid TLS certificate +* TCP support + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services, and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +Here's an example of TCP configuration for NGINX Community on AWS. +Verify that the `ingress-nginx-controller` service manifest has either of the following annotations: + +`service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"` +OR +`service.beta.kubernetes.io/aws-load-balancer-type: nlb` + +{::nomarkdown} +
            +{:/} + +#### Provider-specific configuration + +> The instructions are valid for `k8s.io/ingress-nginx`, the community version of NGINX. + +
            +AWS +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/aws/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for AWS. +
            +
            +Azure (AKS) +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for AKS. + +
            + +
            +Bare Metal Clusters +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/baremetal/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +Bare-metal clusters often have additional considerations. See Bare-metal ingress-nginx considerations. + +
            + +
            +Digital Ocean +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/do/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Digital Ocean. + +
            + +
            +Docker Desktop +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Docker Desktop.
            +Note: By default, Docker Desktop services will provision with localhost as their external address. Triggers in delivery pipelines cannot reach this instance unless they originate from the same machine where Docker Desktop is being used. + +
            + +
            +Exoscale +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/exoscale/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Exoscale. + +
            + + +
            +Google (GKE) +
            +Add firewall rules +
            +GKE by default limits outbound requests from nodes. For the runtime to communicate with the control-plane in Codefresh, add a firewall-specific rule. + +
              +
            1. Find your cluster's network:
              + gcloud container clusters describe [CLUSTER_NAME] --format=get"(network)" +
            2. +
            3. Get the Cluster IPV4 CIDR:
              + gcloud container clusters describe [CLUSTER_NAME] --format=get"(clusterIpv4Cidr)" +
            4. +
            5. Replace the `[CLUSTER_NAME]`, `[NETWORK]`, and `[CLUSTER_IPV4_CIDR]`, with the relevant values:
              + gcloud compute firewall-rules create "[CLUSTER_NAME]-to-all-vms-on-network"
              + + --network="[NETWORK]" \ +
              + + --source-ranges="[CLUSTER_IPV4_CIDR]" \ +
              + + --allow=tcp,udp,icmp,esp,ah,sctp +
              +
            6. +
            +
            +Use ingress-nginx
            +
              +
            1. Create a `cluster-admin` role binding:
              + + kubectl create clusterrolebinding cluster-admin-binding \ +
              + + --clusterrole cluster-admin \ +
              + + --user $(gcloud config get-value account) +
              +
            2. +
            3. Apply:
              + + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml + +
            4. +
            5. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            6. + +
            +We recommend reviewing the provider-specific documentation for GKE. + +
            + + +
            +MicroK8s +
              +
            1. Install using Microk8s addon system:
              + microk8s enable ingress +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +MicroK8s has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. + +
            + + +
            +MiniKube +
              +
            1. Install using MiniKube addon system:
              + minikube addons enable ingress +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +MiniKube has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. + +
            + + + +
            +Oracle Cloud Infrastructure +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Oracle Cloud. + +
            + +
            +Scaleway +
              +
            1. Apply:
              + kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/scw/deploy.yaml +
            2. +
            3. Verify a valid external address exists:
              + kubectl get svc ingress-nginx-controller -n ingress-nginx +
            4. +
            +For additional configuration options, see ingress-nginx documentation for Scaleway. + +
            + +{::nomarkdown} +

            +{:/} + +### Traefik ingress configuration +For detailed configuration information, see [Traefik ingress controller documentation](https://doc.traefik.io/traefik/providers/kubernetes-ingress){:target="\_blank}. + +The table below lists the specific configuration requirements for Codefresh. + +{: .table .table-bordered .table-hover} + +| What to configure | When to configure | +| -------------- | -------------- | +|Valid external IP address | _Before_ installing hybrid runtime | +|Valid SSL certificate | | +|TCP support | | +|Enable report status to cluster| | + +{::nomarkdown} +
            +{:/} + +#### Valid external IP address +Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. + +{::nomarkdown} +
            +{:/} + +#### Valid TLS certificate +For secure runtime installation, the ingress controller must have a valid TLS certificate. +> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. + +{::nomarkdown} +
            +{:/} + +#### TCP support +Configure the ingress controller to handle TCP requests. + +{::nomarkdown} +
            +{:/} + +#### Enable report status to cluster +By default, the Traefik ingress controller is not configured to report its status to the cluster. If not configured, Argo’s health check reports the health status as “progressing”, resulting in a timeout error during installation. + +To enable reporting its status, add `publishedService` to `providers.kubernetesIngress.ingressEndpoint`. + +The value must be in the format `"/"`, where: + `` is the Traefik service from which to copy the status + +```yaml +... +providers: + kubernetesIngress: + ingressEndpoint: + publishedService: "/" # Example, "codefresh/traefik-default" +... +``` + +{::nomarkdown} +
            +{:/} + +## GitOps CLI installation + +### GitOps CLI installation modes +The table lists the modes available to install the Codefresh CLI. + +{: .table .table-bordered .table-hover} +| Install mode | OS | Commands | +| -------------- | ----------| ----------| +| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| +| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | +| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | +| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| +| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`| + +### Install the GitOps CLI +Install the Codefresh CLI using the option that best suits you: `curl`, `brew`, or standard download. +If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for CLI installation. + +1. Do one of the following: + * For first-time installation, go to the Welcome page, select **+ Install Runtime**. + * If you have provisioned a GitOps Runtime, in the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtime**. +1. Install the Codefresh CLI: + * Select one of the installation modes. + * Generate the API key. + * Create the authentication context: + `cf config create-context codefresh --api-key ` + + + {% include + image.html + lightbox="true" + file="/images/getting-started/quick-start/quick-start-download-cli.png" + url="/images/getting-started/quick-start/quick-start-download-cli.png" + alt="Download CLI to install runtime" + caption="Download CLI to install runtime" + max-width="30%" + %} + + +{::nomarkdown} +

            +{:/} + +## Install Hybrid GitOps Runtime + +**Before you begin** +* Make sure you meet the [minimum requirements]({{site.baseurl}}/docs/runtime/requirements/#minimum-requirements) for installation +* Make sure you have [Runtime token with the required scopes from your Git provider]({{site.baseurl}}/docs/reference/git-tokens) +* [Download or upgrade to the latest version of the CLI]({{site.baseurl}}/docs/installation/hybrid-gitops/#hybrid-gitops-upgrade-gitops-cli) +* Review [Hybrid Runtime installation flags](#hybrid-runtime-installation-flags) +* For ingress-based runtimes, make sure your ingress controller is configured correctly: + * [Ambasador ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#ambassador-ingress-configuration) + * [AWS ALB ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#alb-aws-ingress-configuration) + * [Istio ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#istio-ingress-configuration) + * [NGINX Enterprise ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-enterprise-ingress-configuration) + * [NGINX Community ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-community-version-ingress-configuration) + * [Traefik ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#traefik-ingress-configuration) + + +{::nomarkdown} +
            +{:/} + +**How to** + +1. Do one of the following: + * If this is your first Hybrid Runtime installation, in the Welcome page, select **+ Install Runtime**. + * If you have provisioned a Hybrid Runtime, to provision additional runtimes, in the Codefresh UI, go to [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Click **+ Add Runtimes**, and then select **Hybrid Runtimes**. +1. Do one of the following: + * CLI wizard: Run `cf runtime install`, and follow the prompts to enter the required values. + * Silent install: Pass the required flags in the install command: + `cf runtime install --repo --git-token --silent` + For the list of flags, see [Hybrid runtime installation flags](#hybrid-runtime-installation-flags). +1. If relevant, complete the configuration for these ingress controllers: + * [ALB AWS: Alias DNS record in route53 to load balancer]({{site.baseurl}}/docs/runtime/requirements/#alias-dns-record-in-route53-to-load-balancer) + * [Istio: Configure cluster routing service]({{site.baseurl}}/docs/runtime/requirements/#cluster-routing-service) + * [NGINX Enterprise ingress controller: Patch certificate secret]({{site.baseurl}}/docs/runtime/requirements/#patch-certificate-secret) +1. If you bypassed installing ingress resources with the `--skip-ingress` flag for ingress controllers not in the supported list, create and register Git integrations using these commands: + `cf integration git add default --runtime --api-url ` + `cf integration git register default --runtime --token ` + + +{::nomarkdown} +
            +{:/} + + + +## Hybrid GitOps Runtime installation flags +This section describes the required and optional flags to install a Hybrid GitOps Runtime. +For documentation purposes, the flags are grouped into: +* Runtime flags, relating to Runtime, cluster, and namespace requirements +* Ingress-less flags, for tunnel-based installation +* Ingress-controller flags, for ingress-based installation +* Git provider flags +* Codefresh resource flags + +{::nomarkdown} +
            +{:/} + +### Runtime flags + +**Runtime name** +Required. +The Runtime name must start with a lower-case character, and can include up to 62 lower-case characters and numbers. +* CLI wizard: Add when prompted. +* Silent install: Add the `--runtime` flag and define the name. + +**Namespace resource labels** +Optional. +The label of the namespace resource to which you are installing the Hybrid Runtime. Labels are required to identify the networks that need access during installation, as is the case when using services meshes, such as Istio for example. + +* CLI wizard and Silent install: Add the `--namespace-labels` flag, and define the labels in `key=value` format. Separate multiple labels with `commas`. + +**Kube context** +Required. +The cluster defined as the default for `kubectl`. If you have more than one Kube context, the current context is selected by default. + +* CLI wizard: Select the Kube context from the list displayed. +* Silent install: Explicitly specify the Kube context with the `--context` flag. + +**Access mode** +The access mode for the runtime, which can be one of the following: +* [Tunnel-based]({{site.baseurl}}/docs/installation/runtime-architecture/#tunnel-based-hybrid-gitops-runtime-architecture), for runtimes without ingress controllers. This is the default. +* [Ingress-based]({{site.baseurl}}/docs/getting-started/architecture/#ingress-based-hybrid-gitops-runtime-architecture) for runtimes with ingress contollers. + + +* CLI wizard: Select the access mode from the list displayed. +* Silent install: + * For tunnel-based, see [Tunnel-based runtime flags](#tunnel-based-runtime-flags) + * For ingress-based, add the [Ingress controller flags](#ingress-controller-flags) + + >If you don't specify any flags, tunnel-based access is automatically selected. + +**Shared configuration repository** +The Git repository per Runtime account with shared configuration manifests. +* CLI wizard and Silent install: Add the `--shared-config-repo` flag and define the path to the shared repo. + +{::nomarkdown} +
            +{:/} + +### Tunnel-based runtime flags +These flags are required to install tunnel-based Hybrid Runtimes, without an ingress controller. + +**IP allowlist** + +Optional. + +The allowed list of IPs from which to forward requests to the internal customer cluster for ingress-less runtime installations. The allowlist can include IPv4 and IPv6 addresses, with/without subnet and subnet masks. Multiple IPs must be separated by commas. + +When omitted, all incoming requests are authenticated regardless of the IPs from which they originated. + +* CLI wizard and Silent install: Add the `--ips-allow-list` flag, followed by the IP address, or list of comma-separated IPs to define more than one. For example, `--ips-allow-list 77.126.94.70/16,192.168.0.0` + +{::nomarkdown} +
            +{:/} + +### Ingress controller flags + + +**Skip ingress** +Required, if you are using an unsupported ingress controller. +For unsupported ingress controllers, bypass installing ingress resources with the `--skip-ingress` flag. +In this case, after completing the installation, manually configure the cluster's routing service, and create and register Git integrations. See the last step in [Install the Hybrid GitOps Runtime](#install-hybrid-gitops-runtime). + +**Ingress class** +Required. + +* CLI wizard: Select the ingress class for Runtime installation from the list displayed. +* Silent install: Explicitly specify the ingress class through the `--ingress-class` flag. Otherwise, Runtime installation fails. + +**Ingress host** +Required. +The IP address or host name of the ingress controller component. + +* CLI wizard: Automatically selects and displays the host, either from the cluster or the ingress controller associated with the **Ingress class**. +* Silent install: Add the `--ingress-host` flag. If a value is not provided, takes the host from the ingress controller associated with the **Ingress class**. + > Important: For AWS ALB, the ingress host is created post-installation. However, when prompted, add the domain name you will create in `Route 53` as the ingress host. + +**Insecure ingress hosts** +TLS certificates for the ingress host: +If the ingress host does not have a valid TLS certificate, you can continue with the installation in insecure mode, which disables certificate validation. + +* CLI wizard: Automatically detects and prompts you to confirm continuing the installation in insecure mode. +* Silent install: To continue with the installation in insecure mode, add the `--insecure-ingress-host` flag. + +**Internal ingress host** +Optional. +Enforce separation between internal (app-proxy) and external (webhook) communication by adding an internal ingress host for the app-proxy service in the internal network. +For both CLI wizard and Silent install: + +* For new Runtime installations, add the `--internal-ingress-host` flag pointing to the ingress host for `app-proxy`. +* For existing installations, commit changes to the installation repository by modifying the `app-proxy ingress` and `.yaml` + See [(Optional) Internal ingress host configuration for existing Hybrid Runtimes](#optional-internal-ingress-host-configuration-for-existing-hybrid-runtimes). + +{::nomarkdown} +
            +{:/} + + + +### Git provider and repo flags +The Git provider defined for the Runtime. + +>Because Codefresh creates a [shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) for the Runtimes in your account, the Git provider defined for the first Runtime you install in your account is used for all the other Runtimes in the same account. + +You can define any of the following Git providers: +* GitHub: + * [GitHub](#github) (the default Git provider) + * [GitHub Enterprise](#github-enterprise) +* GitLab: + * [GitLab Cloud](#gitlab-cloud) + * [GitLab Server](#gitlab-server) +* Bitbucket: + * [Bitbucket Cloud](#bitbucket-cloud) + * [Bitbucket Server](#bitbucket-server) + +{::nomarkdown} +
            +{:/} + + + +#### GitHub +GitHub is the default Git provider for Hybrid Runtimes. Being the default provider, for both the CLI wizard and Silent install, you need to provide only the repository URL and the Git runtime token. + +> For the required scopes, see [GitHub and GitHub Enterprise Runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). + +`--repo --git-token ` + +where: +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. Copy the clone URL from your GitHub website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). + If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during the installation. + + Repo URL format: + `https://github.com//reponame>.git[/subdirectory][?ref=branch]` + where: + * `/` is your username or organization name, followed by the name of the repo, identical to the HTTPS clone URL. For example, `https://github.com/nr-codefresh/codefresh.io.git`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://github.com/nr-codefresh/codefresh.io.git/runtimes/defs?ref=codefresh-prod` +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). + +{::nomarkdown} +
            +{:/} + +#### GitHub Enterprise + +> For the required scopes, see [GitHub and GitHub Enterprise runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). + + +`--provider github --repo --git-token ` + +where: +* `--provider github` (required), defines GitHub Enterprise as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitHub Enterprise website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). + If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during the installation. + Repo URL format: + + `https://ghe-trial.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` + where: + * `/` is your username or organization name, followed by the name of the repo. For example, `codefresh-io/codefresh.io.git`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://ghe-trial.devops.cf-cd.com/codefresh-io/codefresh.io.git/runtimes/defs?ref=codefresh-prod` +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + +#### GitLab Cloud +> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). + + +`--provider gitlab --repo --git-token ` + +where: +* `--provider gitlab` (required), defines GitLab Cloud as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git project for the Runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitLab website. + If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during the installation. + + > Important: You must create the group with access to the project prior to the installation. + + Repo URL format: + + `https://gitlab.com//.git[/subdirectory][?ref=branch]` + where: + * `` is either your username, or if your project is within a group, the front-slash separated path to the project. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) + * `` is the name of the project. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Examples: + `https://gitlab.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) + + `https://gitlab.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) + +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + + +#### GitLab Server + +> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). + +`--provider gitlab --repo --git-token ` + +where: +* `--provider gitlab` (required), defines GitLab Server as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. + If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during the installation. + + > Important: You must create the group with access to the project prior to the installation. + + Repo URL format: + `https://gitlab-onprem.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` + where: + * `` is your username, or if the project is within a group or groups, the name of the group. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) + * `` is the name of the project. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Examples: + `https://gitlab-onprem.devops.cf-cd.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) + + `https://gitlab-onprem.devops.cf-cd.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) + +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + +#### Bitbucket Cloud +> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). + + +`--provider bitbucket --repo --git-user --git-token ` + +where: +* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. + If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during Runtime installation. + >Important: Remove the username, including @ from the copied URL. + + Repo URL format: + + `https://bitbucket.org.git[/subdirectory][?ref=branch]` + where: + * `` is your workspace ID. For example, `nr-codefresh`. + * `` is the name of the repository. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://bitbucket.org/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` +* `--git-user ` (required), is your username for the Bitbucket Cloud account. +* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). + + +{::nomarkdown} +
            +{:/} + +#### Bitbucket Server + +> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). + + +`--provider bitbucket-server --repo --git-user --git-token ` + +where: +* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the Runtime and the account. +* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the Runtime installation, including the `.git` suffix. + If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh then creates the project during the installation. + >Important: Remove the username, including @ from the copied URL. + + Repo URL format: + + `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm//.git[/subdirectory][?ref=branch]` + where: + * `` is your username or organization name. For example, `codefresh-io.`. + * `` is the name of the repo. For example, `codefresh`. + * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the Runtime is installed in the root of the repository. For example, `/runtimes/defs`. + * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the Runtime is installed in the default branch. For example, `codefresh-prod`. + + Example: + `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm/codefresh-io/codefresh.git/runtimes/defs?ref=codefresh-prod` +* `--git-user ` (required), is your username for the Bitbucket Server account. +* `--git-token ` (required), is the Git token authenticating access to the Runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). + +{::nomarkdown} +

            +{:/} + +### Codefresh resource flags +**Codefresh demo resources** +Optional. +Install demo pipelines to use as a starting point to create your own GitOps pipelines. We recommend installing the demo resources as these are used in our quick start tutorials. + +* Silent install: Add the `--demo-resources` flag, and define its value as `true` (default), or `false`. For example, `--demo-resources=true` + +**Insecure flag** +For _on-premises installations_, if the Ingress controller does not have a valid SSL certificate, to continue with the installation, add the `--insecure` flag to the installation command. + +{::nomarkdown} +

            +{:/} + + + + + + + +## (Optional) Internal ingress host configuration for existing Hybrid Runtimes +If you already have provisioned Hybrid Runtimes, to use an internal ingress host for app-proxy communication and an external ingress host to handle webhooks, change the specs for the `Ingress` and `Runtime` resources in the Runtime installation repository. Use the examples as guidelines. + +`/apps/app-proxy/overlays//ingress.yaml`: change `host` + +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: codefresh-cap-app-proxy + namespace: codefresh #replace with your runtime name +spec: + ingressClassName: nginx + rules: + - host: my-internal-ingress-host # replace with the internal ingress host for app-proxy + http: + paths: + - backend: + service: + name: cap-app-proxy + port: + number: 3017 + path: /app-proxy/ + pathType: Prefix +``` + +`..//bootstrap/.yaml`: add `internalIngressHost` + +```yaml +apiVersion: v1 +data: + base-url: https://g.codefresh.io + runtime: | + apiVersion: codefresh.io/v1alpha1 + kind: Runtime + metadata: + creationTimestamp: null + name: codefresh #replace with your runtime name + namespace: codefresh #replace with your runtime name + spec: + bootstrapSpecifier: github.com/codefresh-io/cli-v2/manifests/argo-cd + cluster: https://7DD8390300DCEFDAF87DC5C587EC388C.gr7.us-east-1.eks.amazonaws.com + components: + - isInternal: false + name: events + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/argo-events + wait: true + - isInternal: false + name: rollouts + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/argo-rollouts + wait: false + - isInternal: false + name: workflows + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/argo-workflows + wait: false + - isInternal: false + name: app-proxy + type: kustomize + url: github.com/codefresh-io/cli-v2/manifests/app-proxy + wait: false + defVersion: 1.0.1 + ingressClassName: nginx + ingressController: k8s.io/ingress-nginx + ingressHost: https://support.cf.com/ + internalIngressHost: https://my-internal-ingress-host # add this line and replace my-internal-ingress-host with your internal ingress host + repo: https://github.com/NimRegev/my-codefresh.git + version: 99.99.99 +``` + + +## Related articles +[Add external clusters to Hybrid and Hosted Runtimes]({{site.baseurl}}/docs/installation/managed-cluster/) +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/monitor-manage-runtimes/) +[Add Git Sources to runtimes]({{site.baseurl}}/docs/installation/git-sources/) +[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) +[Troubleshoot Hybrid Runtime installation]({{site.baseurl}}/installation/troubleshooting/runtime-issues/) diff --git a/_docs/installation/installation-options.md b/_docs/installation/installation-options.md new file mode 100644 index 00000000..89009723 --- /dev/null +++ b/_docs/installation/installation-options.md @@ -0,0 +1,231 @@ +--- +title: "Installation environments" +description: "" +group: installation +toc: true +--- +To be changed and updated for ProjectOne + +The Codefresh platform supports two different installation environments, each with different installation options. + +* CI/CD installation environment + The CI/CD installation environment is optimized for Continuous Integration/Delivery with Codefresh pipelines. CI pipelines created in Codefresh fetch code from your Git repository, packages/compiles the code, and deploys the final artifact to a target environment. + + The CI/CD installation environment supports these installation options: + * Hybrid, where the Codefresh CI/CD UI runs in the Codefresh cloud, and the builds run on customer premises + * SaaS, a full cloud version that is fully managed by Codefresh + * On-premises, where Codefresh CI/CD runs within the customer datacenter/cloud + + On-premises and Hybrid CI/CD options are available to Enterprise customers looking for a "behind-the-firewall" solution. + +* GitOps installation environment + The GitOps installation environment is a full-featured solution for application deployments and releases. Powered by the Argo Project, Codefresh uses Argo CD, Argo Workflows, Argo Events, and Argo Rollouts, extended with unique functionality and features essential for enterprise deployments. + + GitOps installations support Hosted and Hybrid options. + +## Comparison +Both environments can co-exist giving you the best of both worlds. For + +TBD + + +## Codefresh CI/CD installation options + + + + + + + + + +### Codefresh Cloud CI/CD - likely to be removed + +The Codefresh CI/CD Cloud version is the easiest way to start using Codefresh as it is fully managed and runs 100% on the cloud. Codefresh DevOps handles the maintenance and updates. + +You can also create a [free account]({{site.baseurl}}/docs/getting-started/create-a-codefresh-account/) on the SAAS version right away. The account is forever free with some limitations +on number of builds. + +The cloud version runs on multiple clouds: + +{% include image.html + lightbox="true" + file="/images/installation/codefresh-saas.png" + url="/images/installation/codefresh-saas.png" + alt="sso-diagram.png" + max-width="60%" + %} + +Codefresh Cloud is also compliant with [SOC2 - Type2](https://www.aicpa.org/SOC) showing our commitment to security and availability. + +{% include image.html + lightbox="true" + file="/images/installation/soc2-type2-certified.png" + url="/images/installation/soc2-type2-certified.png" + alt="sso-diagram.png" + max-width="40%" + %} + +The Cloud version has multi-account support with most git providers (GitLab, GitHub, Bitbucket) as well as Azure and Google. + + +### Codefresh Hybrid CI/CD + +The Hybrid CI/CD installation option is for organizations who want their source code to live within their premises, or have other security constraints. For more about the theory and implementation, see [CI/CD behind the firewall installation]({{site.baseurl}}/docs/administration/behind-the-firewall/). + +The UI runs on Codefresh infrastructure, while the builds happen in a Kubernetes cluster in the customer's premises. + +{% include image.html + lightbox="true" + file="/images/installation/hybrid-installation.png" + url="/images/installation/hybrid-installation.png" + alt="sso-diagram.png" + max-width="70%" + %} + + +CI/CD Hybrid installation strikes the perfect balance between security, flexibility, and ease of use. Codefresh still does the heavy lifting for maintaining most of the platform parts. The sensitive data (such as source code and internal services) never leave the premises of the customers. + +With Hybrid CI/CD installation, Codefresh can easily connect to internal [secure services]({{site.baseurl}}/docs/reference/behind-the-firewall/#using-secure-services-in-your-pipelines) that have no public presence. +The UI part is still compliant with Soc2. + + +Here are the security implications of CI/CD Hybrid installation: + +{: .table .table-bordered .table-hover} +| Company Asset | Flow/Storage of data | Comments | +| -------------- | ---------------------------- |-------------------------| +| Source code | Stays behind the firewall | | +| Binary artifacts | Stay behind the firewall | | +| Build logs | Also sent to Codefresh Web application | | +| Pipeline volumes | Stay behind the firewall | | +| Pipeline variables | Defined in Codefresh Web application | | +| Deployment docker images | Stay behind the firewall| Stored on your Docker registry | +| Development docker images | Stay behind the firewall | Stored on your Docker registry| +| Testing docker images | Stay behind the firewall| Stored on your Docker registry | +| Inline pipeline definition | Defined in Codefresh Web application | | +| Pipelines as YAML file | Stay behind the firewall | | +| Test results | Stay behind the firewall | | +| HTML Test reports | Shown on Web application | Stored in your S3 or Google bucket or Azure storage | +| Production database data | Stays behind the firewall | | +| Test database data | Stays behind the firewall | | +| Other services (e.g. Queue, ESB) | Stay behind the firewall | | +| Kubernetes deployment specs | Stay behind the firewall | | +| Helm charts | Stay behind the firewall | | +| Other deployment resources/script (e.g. terraform) | Stay behind the firewall | | +| Shared configuration variables | Defined in Codefresh Web application | | +| Deployment secrets (from git/Puppet/Vault etc) | Stay behind the firewall| | +| Audit logs | Managed via Codefresh Web application | | +| SSO/Idp Configuration | Managed via Codefresh Web application | | +| User emails | Managed via Codefresh Web application | | +| Access control rules | Managed via Codefresh Web application | | + + + +### Codefresh On-premises CI/CD + +For customers who want full control, Codefresh also offers an on-premises option for CI/CD installation. Both the UI and builds run on a Kubernetes cluster fully managed by the customer. + +While Codefresh can still help with maintenance of the CI/CD On-premises, we would recommend the Hybrid CI/CD option first as it offers the most flexibility while maintaining high security. + +### CI/CD installation comparison + +{: .table .table-bordered .table-hover} +| Characteristic | Cloud | Hybrid | On Premise | +| -------------- | ---------------------------- |-------------------------| +| Managed by | Codefresh | Codefresh and Customer | Customer | +| UI runs on | public cloud | public cloud | private cluster | +| Builds run on | public cloud | private cluster | private cluster | +| Access to secure/private services | no | yes | yes | +| Customer maintenance effort | none | some | full | +| Best for | most companies | companies with security constraints | Large scale installations | +| Available to | all customers | [enterprise plans](https://codefresh.io/contact-us/) | [enterprise plans](https://codefresh.io/contact-us/) | + + +## Codefresh GitOps installation options + +Similar to CI/CD installation options, Codefresh GitOps also supports SaaS and hybrid installation options: + + +### Hosted GitOps +The SaaS version of GitOps, has Argo CD installed in the Codefresh cluster. +Hosted GitOps Runtime is installed and provisioned in a Codefresh cluster, and managed by Codefresh. +Hosted enviroments are full-cloud environments, where all updates and improvements are managed by Codefresh, with zero-maintenance overhead for you as the customer. Currently, you can add one Hosted GitOps Runtime per account. +For the architecture, see [Hosted GitOps Runtime architecture]({{site.baseurl}}/docs/installation/architecture/#hosted-gitops-runtime-architecture). + + +{% include + image.html + lightbox="true" + file="/images/runtime/intro-hosted-hosted-initial-view.png" + url="/images/runtime/intro-hosted-hosted-initial-view.png" + alt="Hosted runtime setup" + caption="Hosted runtime setup" + max-width="80%" +%} + + For more information on how to set up the hosted environment, including provisioning hosted runtimes, see [Set up Hosted GitOps]({{site.baseurl}}/docs/installation/hosted-runtime/). + +### Hybrid GitOps +The hybrid version of GitOps, has Argo CD installed in the customer's cluster. +Hybrid GitOps is installed in the customer's cluster, and managed by the customer. +The Hybrid GitOps Runtime is optimal for organizations with security constraints, wanting to manage CI/CD operations within their premises. Hybrid GitOps strikes the perfect balance between security, flexibility, and ease of use. Codefresh maintains and manages most aspects of the platform, apart from installing and upgrading Hybrid GitOps Runtimes which are managed by the customer. + + +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view.png" + url="/images/runtime/runtime-list-view.png" + alt="Runtime List View" + caption="Runtime List View" + max-width="70%" +%} + + For more information on hybrid environments, see [Hybrid GitOps runtime requirements]({{site.baseurl}}/docs/installation/hybrid-gitops/#minimum-system-requirements) and [Installling Hybrid GitOps Runtimes]({{site.baseurl}}/docs/installation/hybrid-gitops/). + + + + + +### Hosted vs.Hybrid GitOps + +The table below highlights the main differences between Hosted and Hybrid GitOps. + +{: .table .table-bordered .table-hover} +| GitOps Functionality |Feature | Hosted | Hybrid | +| -------------- | -------------- |--------------- | --------------- | +| Runtime | Installation | Provisioned by Codefresh | Provisioned by customer | +| | Runtime cluster | Managed by Codefresh | Managed by customer | +| | Number per account | One runtime | Multiple runtimes | +| | External cluster | Managed by customer | Managed by customer | +| | Upgrade | Managed by Codefresh | Managed by customer | +| | Uninstall | Managed by customer | Managed by customer | +| Argo CD | | Codefresh cluster | Customer cluster | +| CI Ops | Delivery Pipelines |Not supported | Supported | +| |Workflows | Not supported | Supported | +| |Workflow Templates | Not supported | Supported | +| CD Ops |Applications | Supported | Supported | +| |Image enrichment | Supported | Supported | +| | Rollouts | Supported | Supported | +|Integrations | | Supported | Supported | +|Dashboards |Home Analytics | Hosted runtime and deployments|Runtimes, deployments, Delivery Pipelines | +| |DORA metrics | Supported |Supported | +| |Applications | Supported |Supported | + +### Related articles +[Architecture]({{site.baseurl}}/docs/installation/runtime-architecture/) +[Add Git Sources to GitOps Runtimes]({{site.baseurl}}/docs/installation/git-sources/) +[Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration) + diff --git a/_docs/runtime/managed-cluster.md b/_docs/installation/managed-cluster.md similarity index 69% rename from _docs/runtime/managed-cluster.md rename to _docs/installation/managed-cluster.md index 25ae4546..fb010209 100644 --- a/_docs/runtime/managed-cluster.md +++ b/_docs/installation/managed-cluster.md @@ -1,42 +1,42 @@ --- -title: "Add external clusters to runtimes" +title: "Add external clusters to GitOps Runtimes" description: "" -group: runtime +group: installation toc: true --- -Register external clusters to provisioned hybrid or hosted runtimes in Codefresh. Once you add an external cluster, you can deploy applications to that cluster without having to install Argo CD in order to do so. External clusters allow you to manage multiple clusters through a single runtime. +Register external clusters to provisioned Hybrid or Hosted GitOps Runtimes in Codefresh. Once you add an external cluster, you can deploy applications to that cluster without having to install Argo CD in order to do so. Manage manage multiple external clusters through a single Runtime. -When you add an external cluster to a provisioned runtime, the cluster is registered as a managed cluster. A managed cluster is treated as any other managed K8s resource, meaning that you can monitor its health and sync status, deploy applications on the cluster and view information in the Applications dashboard, and remove the cluster from the runtime's managed list. +When you add an external cluster to a provisioned Runtime, the cluster is registered as a managed cluster. A managed cluster is treated as any other managed K8s resource, meaning that you can monitor its health and sync status, deploy applications to it, view information in the Applications dashboard, and remove the cluster from the Runtime's managed list. Add managed clusters through: * Codefresh CLI * Kustomize -Adding a managed cluster via Codefresh ensures that Codefresh applies the required RBAC resources (`ServiceAccount`, `ClusterRole` and `ClusterRoleBinding`) to the target cluster, creates a `Job` that updates the selected runtime with the information, registers the cluster in Argo CD as a managed cluster, and updates the platform with the new cluster information. +Adding a managed cluster via Codefresh ensures that Codefresh applies the required RBAC resources (`ServiceAccount`, `ClusterRole` and `ClusterRoleBinding`) to the target cluster, creates a `Job` that updates the selected Runtime with the information, registers the cluster in Argo CD as a managed cluster, and updates the platform with the new cluster information. -### Add a managed cluster with Codefresh CLI -Add an external cluster to a provisioned runtime through the Codefresh CLI. When adding the cluster, you can also add labels and annotations to the cluster, which are added to the cluster secret created by Argo CD. +## Add a managed cluster with Codefresh CLI +Add an external cluster to a provisioned GitOps Runtime through the Codefresh CLI. When adding the cluster, you can also add labels and annotations to the cluster, which are added to the cluster secret created by Argo CD. Optionally, to first generate the YAML manifests, and then manually apply them, use the `dry-run` flag in the CLI. **Before you begin** - -* For _hosted_ runtimes: [Configure access to these IP addresses]({{site.baseurl}}/docs/administration/platform-ip-addresses/) +* For _Hosted_ Runtimes: [Configure access to these IP addresses]({{site.baseurl}}/docs/administration/platform-ip-addresses/) * Verify that: - * Your Git personal access token is valid and has the correct permissions - * You have installed the latest version of the Codefresh CLI + * Your Git personal access token is valid and has the [required scopes]({{site.baseurl}}/docs/reference/git-tokens) + * You have installed the [latest version of the Codefresh CLI]({{site.baseurl}}/docs/installation/monitor-manage-runtimes/#hybrid-gitops-upgrade-gitops-cli) **How to** -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. From either the **Topology** or **List** views, select the runtime to which to add the cluster. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. From either the **Topology** or **List** views, select the Runtime to which to add the cluster. 1. Topology View: Select {::nomarkdown}{:/}. List View: Select the **Managed Clusters** tab, and then select **+ Add Cluster**. 1. In the Add Managed Cluster panel, copy and run the command: - `cf cluster add [--labels label-key=label-value] [--annotations annotation-key=annotation-value][--dry-run]` + `cf cluster add [runtime-name] [--labels label-key=label-value] [--annotations annotation-key=annotation-value][--dry-run]` where: + * `runtime-name` is the name of the Runtime to which to add the cluster. * `--labels` is optional, and required to add labels to the cluster. When defined, add a label in the format `label-key=label-value`. Separate multiple labels with `commas`. * `--annotations` is optional, and required to add annotations to the cluster. When defined, add an annotation in the format `annotation-key=annotation-value`. Separate multiple annotations with `commas`. * `--dry-run` is optional, and required if you want to generate a list of YAML manifests that you can redirect and apply manually with `kubectl`. @@ -54,7 +54,7 @@ Optionally, to first generate the YAML manifests, and then manually apply them, {:start="5"} 1. If you used `dry-run`, apply the generated manifests to the same target cluster on which you ran the command. - Here is an example of the YAML manifest generated with the `--dry-run` flag. Note that there are placeholders in the example, which are replaced with the actual values with `--dry-run`. + Here is an example of the YAML manifest generated with the `--dry-run` flag. Note that the example has placeholders, which are replaced with the actual values during the `--dry-run`. ```yaml @@ -177,9 +177,9 @@ spec: ``` -The new cluster is registered to the runtime as a managed cluster. +The new cluster is registered to the Runtime as a managed cluster. -### Add a managed cluster with Kustomize +## Add a managed cluster with Kustomize Create a `kustomization.yaml` file with the information shown in the example below, and run `kustomize build` on it. ```yaml @@ -222,16 +222,20 @@ resources: ``` -### Work with managed clusters -Work with managed clusters in hybrid or hosted runtimes in either the Topology or List runtime views. For information on runtime views, see [Runtime views]({{site.baseurl}}/docs/runtime/runtime-views). -As the cluster is managed through the runtime, updates to the runtime automatically updates the components on all the managed clusters that include it. +## Work with managed clusters +Work with managed clusters in either the Topology or List Runtime views. For information on Runtime views, see [Runtime views]({{site.baseurl}}/docs/runtime/runtime-views). +As the cluster is managed through the Runtime, updates to the Runtime automatically updates the components on all the managed clusters that include it. View connection status for the managed cluster, and health and sync errors. Health and sync errors are flagged by the error notification in the toolbar, and visually flagged in the List and Topology views. -#### Install Argo Rollouts -Install Argo Rollouts directly from Codefresh with a single click to visualize rollout progress in the [Applications dashboard]({{site.baseurl}}/docs/deployment/applications-dashboard/). If Argo Rollouts has not been installed, an **Install Argo Rollouts** button is displayed on selecting the managed cluster. +### Install Argo Rollouts +Applications with `rollout` resources need Argo Rollouts on the target cluster, both to visualize rollouts in the Applications dashboard and control rollout steps with the Rollout Player. +If Argo Rollouts has not been installed on the target cluster, it displays **Install Argo Rollouts** button. + +Install Argo Rollouts with a single click to execute rollout instructions, deploy the application, and visualize rollout progress in the [Applications dashboard]({{site.baseurl}}/docs/deployment/applications-dashboard/). + -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. 1. Select **Topology View**. 1. Select the target cluster, and then select **+ Install Argo Rollouts**. @@ -246,16 +250,16 @@ Install Argo Rollouts directly from Codefresh with a single click to visualize r %} -#### Remove a managed cluster from the Codefresh UI -Remove a cluster from the runtime's list of managed clusters from the Codefresh UI. +### Remove a managed cluster from the Codefresh UI +Remove a cluster from the Runtime's list of managed clusters from the Codefresh UI. > You can also remove it through the CLI. -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. 1. Select either the **Topology View** or the **List View** tabs. 1. Do one of the following: - * In the Topology View, select the cluster node from the runtime it is registered to. - * In the List View, select the runtime, and then select the **Managed Clusters** tab. + * In the Topology View, select the cluster node from the Runtime it is registered to. + * In the List View, select the Runtime, and then select the **Managed Clusters** tab. 1. Select the three dots next to the cluster name, and then select **Uninstall** (Topology View) or **Remove** (List View). {% include @@ -269,8 +273,8 @@ Remove a cluster from the runtime's list of managed clusters from the Codefresh %} -#### Remove a managed cluster through the Codefresh CLI -Remove a cluster from the list managed by the runtime, through the CLI. +### Remove a managed cluster through the Codefresh CLI +Remove a cluster from the list managed by the Runtime, through the CLI. * Run: `cf cluster remove --server-url ` @@ -279,7 +283,6 @@ Remove a cluster from the list managed by the runtime, through the CLI. `` is the URL of the server on which the managed cluster is installed. -### Related articles -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Manage provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[(Hybrid) Monitor provisioned runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) \ No newline at end of file +## Related articles +[Add Git Sources to GitOps Runtimes]({{site.baseurl}}/docs/installation/git-sources/) +[Monitoring & managing GitOps Runtimes]({{site.baseurl}}/docs/installation/monitor-manage-runtimes/) diff --git a/_docs/installation/monitor-manage-runtimes.md b/_docs/installation/monitor-manage-runtimes.md new file mode 100644 index 00000000..08267a95 --- /dev/null +++ b/_docs/installation/monitor-manage-runtimes.md @@ -0,0 +1,643 @@ +--- +title: "Monitoring & managing GitOps Runtimes" +description: "" +group: runtime +redirect_from: + - /monitor-manage-runtimes/ + - /monitor-manage-runtimes +toc: true +--- + + +The **Runtimes** page displays the provisioned GitOps Runtimes in your account, both Hybrid, and the Hosted Runtime if you have one. + +View Runtime components and information in List or Topology view formats to monitor and manage them. + +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view.png" + url="/images/runtime/runtime-list-view.png" + alt="Runtime List View" + caption="Runtime List View" + max-width="70%" +%} + +Monitor provisioned GitOps Runtimes for security, health, and sync errors: + +* (Hybrid and Hosted) View/download logs for Runtimes and for Runtime components +* (Hybrid) Restore provisioned Runtimes +* (Hybrid) Configure browsers to allow access to insecure Runtimes +* (Hybrid) Monitor notifications in the Activity Log + + +Manage provisioned GitOps Runtimes: +* [Add managed clusters to GitOps Runtimes]({{site.baseurl}}/docs/installation/managed-cluster/) +* [Add and manage Git Sources for GitOps Runtimes]({{site.baseurl}}/docs/installation/git-sources/) +* +* Upgrade GitOps CLI +* Upgrade Hybrid GitOps Runtimes +* Uninstall GitOps Runtimes + + + +> Unless specified otherwise, all options are common to both types of GitOps Runtimes. If an option is valid only for Hybrid GitOps, it is indicated as such. + + +## GitOps Runtime views + +View provisioned GitOps Runtimes in List or Topology view formats. + +* List view: The default view, displays the list of provisioned Runtimes, the clusters managed by them, and Git Sources associated with them. +* Topology view: Displays a hierarchical view of Runtimes and the clusters managed by them, with health and sync status of each cluster. + +### List view + +The List view is a grid-view of the provisioned Runtimes. + +Here is an example of the List view for runtimes. +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view.png" + url="/images/runtime/runtime-list-view.png" + alt="Runtime List View" + caption="Runtime List View" + max-width="70%" +%} + +Here is a description of the information in the List View. + +{: .table .table-bordered .table-hover} +| List View Item| Description | +| -------------- | ---------------- | +|**Name**| The name of the provisioned GitOps Runtime. | +|**Type**| The type of GitOps Runtime provisioned, and can be **Hybrid** or **Hosted**. | +|**Cluster/Namespace**| The K8s API server endpoint, as well as the namespace with the cluster. | +|**Modules**| The modules installed based on the type of provisioned Runtime. Hybrid Runtimes include CI amnd CD Ops modules. Hosted runtimes include CD Ops. | +|**Managed Cluster**| The number of managed clusters if any, for the runtime. To view list of managed clusters, select the runtime, and then the **Managed Clusters** tab. To work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster).| +|**Version**| The version of the runtime currently installed. **Update Available!** indicates there are later versions of the runtime. To see all the commits to the runtime, mouse over **Update Available!**, and select **View Complete Change Log**. +|**Last Updated**| The most recent update information from the runtime to the Codefresh platform. Updates are sent to the platform typically every few minutes. Longer update intervals may indicate networking issues.| +|**Sync Status**| The health and sync status of the runtime or cluster. {::nomarkdown}
            • indicates health or sync errors in the runtime, or a managed cluster if one was added to the runtime.
              The runtime name is colored red.
            • indicates that the runtime is being synced to the cluster on which it is provisioned.
            {:/} | + +### Topology view + +A hierachical visualization of the provisioned Runtimes. The Topology view makes it easy to identify key information such as versions, health and sync status, for both the provisioned Runtime and the clusters managed by it. +Here is an example of the Topology view for Runtimes. + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-topology-view.png" + url="/images/runtime/runtime-topology-view.png" + alt="Runtime Topology View" + caption="Runtime Topology View" + max-width="30%" +%} + +Here is a description of the information in the Topology view. + +{: .table .table-bordered .table-hover} +| Topology View Item | Description | +| ------------------------| ---------------- | +|**Runtime** | ![](../../../images/icons/codefresh-runtime.png?display=inline-block) the provisioned Runtime. Hybrid Runtimes display the name of the K8s API server endpoint with the cluster. Hosted Runtimes display 'hosted'. | +|**Cluster** | The local, and managed clusters if any, for the Runtime. {::nomarkdown}
            • indicates the local cluster, always displayed as `in-cluster`. The in-cluster server URL is always set to `https://kubernetes.default.svc/`.
            • indicates a managed cluster.
            • select to add a new managed cluster.
            {:/} To view cluster components, select the cluster. To add and work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster). | +|**Health/Sync status** |The health and sync status of the Runtime or cluster. {::nomarkdown}
            • indicates health or sync errors in the Runtime, or a managed cluster if one was added to the runtime.
              The runtime or cluster node is bordered in red and the name is colored red.
            • indicates that the Runtime is being synced to the cluster on which it is provisioned.
            {:/} | +|**Search and View options** | {::nomarkdown}
            • Find a Runtime or its clusters by typing part of the Runtime/cluster name, and then navigate to the entries found.
            • Topology view options: Resize to window, zoom in, zoom out, full screen view.
            {:/}| + +## Managing provisioned GitOps Runtimes +* [Reset shared configuration repository for GitOps Runtimes](#reset-shared-configuration-repository-for-gitpps-runtimes) +* [(Hybrid GitOps) Upgrade GitOps CLI](#hybrid-gitops-upgrade-gitops-cli) +* [(Hybrid GitOps) Upgrade provisioned Runtimes](#hybrid-gitops-upgrade-provisioned-runtimes) +* [Uninstall provisioned GitOps Runtimes](#uninstall-provisioned-gitops-runtimes) +* [Update Git tokens for Runtimes](#update-git-tokens-for-runtimes) + +### Reset shared configuration repository for GitOps Runtimes +Codefresh creates the [shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration) when you install the first hybrid or hosted GitOps runtime for your account, and uses it for all runtimes you add to the same account. + +If needed, you can reset the location of the shared configuration repository in your account and re-initialize it. For example, when moving from evaluation to production. +Uninstall all the existing runtimes in your account, and then run the reset command. On the next installation, Codefresh re-initializes the shared configuration repo. + +**Before you begin** +[Uninstall every runtime in the account](#uninstall-provisioned-gitops-runtimes) + +**How to** +* Run: + `cf config --reset-shared-config-repo` + +### (Hybrid GitOps) Upgrade GitOps CLI +Upgrade the CLI to the latest version to prevent Runtime installation errors. + +1. Check the version of the CLI you have installed: + `cf version` +1. Compare with the [latest version](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"} released by Codefresh. +1. Select and run the appropriate command: + +{: .table .table-bordered .table-hover} +| Download mode | OS | Commands | +| -------------- | ----------| ----------| +| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| +| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | +| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | +| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| +| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`| + +### (Hybrid GitOps) Upgrade provisioned Runtimes + +Upgrade provisioned Hybrid Runtimes to install critical security updates or the latest versions of all components. Upgrade a provisioned Hybrid Runtime by running a silent upgrade or through the CLI wizard. +If you have managed clusters for the Hybrid Runtime, upgrading the Runtime automatically updates runtime components within the managed cluster as well. + +> When there are security updates, the UI displays the alert, _At least one runtime requires a security update_. The Version column displays an _Update Required!_ notification. + +> If you have older Hybrid Runtime versions, upgrade to manually define or create the shared configuration repo for your account. See [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/). + + +**Before you begin** +For both silent or CLI-wizard based upgrades, make sure you have: + +* The latest version of the Codefresh CLI + Run `cf version` to see your version and [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"} to compare with the latest CLI version. +* A valid Git token with [the required scopes]({{site.baseurl}}/docs/reference/git-tokens) + +**Silent upgrade** + +* Pass the mandatory flags in the upgrade command: + + `cf runtime upgrade --git-token --silent` + where: + `` is a valid Git token with the correct scopes. + +**CLI wizard-based upgrade** + +1. In the Codefresh UI, make sure you are in [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Switch to either the **List View** or to the **Topology View**. +1. **List view**: + * Select the Runtime name. + * To see all the commits to the Runtime, in the Version column, mouse over **Update Available!**, and select **View Complete Change Log**. + * On the top-right, select **Upgrade**. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-list-view-upgrade.png" + url="/images/runtime/runtime-list-view-upgrade.png" + alt="List View: Upgrade runtime option" + caption="List View: Upgrade runtime option" + max-width="30%" + %} + + **Topology view**: + Select the Runtime cluster, and from the panel, select the three dots and then select **Upgrade Runtime**. + {% include + image.html + lightbox="true" + file="/images/runtime/runtiime-topology-upgrade.png" + url="/images/runtime/runtiime-topology-upgrade.png" + alt="Topology View: Upgrade runtime option" + caption="Topology View: Upgrade runtime option" + max-width="30%" +%} + +{:start="4"} + +1. If you have already installed the Codefresh CLI, in the Install Upgrades panel, copy the upgrade command. + + {% include + image.html + lightbox="true" + file="/images/runtime/install-upgrades.png" + url="/images/runtime/install-upgrades.png" + alt="Upgrade runtime" + caption="Upgrade runtime panel" + max-width="30%" +%} + +{:start="5"} +1. In your terminal, paste the command, and do the following: + * Update the Git token value. + * To manually define the shared configuration repo, add the `--shared-config-repo` flag with the path to the repo. +1. Confirm to start the upgrade. + + + + + + +### Uninstall provisioned GitOps Runtimes + +Uninstall provisioned GitOps Runtimes that are not in use. Uninstall a Runtime through a silent uninstall or through the CLI wizard. +> Uninstalling a Runtime removes the Git Sources and managed clusters associated with it. + +**Before you begin** +For both types of uninstalls, make sure you have: + +* The latest version of the GitOps CLI +* A valid runtime Git token +* The Kube context from which to uninstall the provisioned Runtime + +**Silent uninstall** +Pass the mandatory flags in the uninstall command: + `cf runtime uninstall --git-token --silent` + where: + `--git-token` is a valid runtime token with the `repo` and `admin-repo.hook` scopes. + +**CLI wizard uninstall** + +1. In the Codefresh UI, make sure you are in [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Switch to either the **List View** or to the **Topology View**. +1. **List view**: On the top-right, select the three dots and then select **Uninstall**. + + {% include + image.html + lightbox="true" + file="/images/runtime/uninstall-location.png" + url="/images/runtime/uninstall-location.png" + alt="List View: Uninstall runtime option" + caption="List View: Uninstall runtime option" + max-width="30%" +%} + +**Topology view**: Select the Runtime node, and from the panel, select the three dots and then select **Uninstall Runtime**. + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-topology-uninstall.png" + url="/images/runtime/runtime-topology-uninstall.png" + alt="Topology View: Uninstall runtime option" + caption="Topology View: Uninstall runtime option" + max-width="30%" +%} + +{:start="4"} + +1. If you already have the latest version of the Codefresh CLI, in the Uninstall Codefresh Runtime panel, copy the uninstall command. + + {% include + image.html + lightbox="true" + file="/images/runtime/uninstall.png" + url="/images/runtime/uninstall.png" + alt="Uninstall Codefresh runtime" + caption="Uninstall Codefresh runtime" + max-width="40%" +%} + +{:start="5"} + +1. In your terminal, paste the command, and update the Git token value. +1. Select the Kube context from which to uninstall the Runtime, and then confirm the uninstall. +1. If you get errors, run the uninstall command again, with the `--force` flag. + + + +### Update Git tokens for Runtimes + +Provisioned Runtimes require valid Git tokens at all times to authenticate Git actions by you as a user. +>These tokens are specific to the user, and the same can be used for multiple runtimes. + +There are two different situations when you need to update Git tokens: +* Update invalid, revoked, or expired tokens: Codefresh automatically flags Runtimes with such tokens. It is mandatory to update the Git tokens to continue working with the platform. +* Update valid tokens: Optional. You may want to update Git tokens, even valid ones, by deleting the existing token and replacing it with a new token. + +The methods for updating any Git token are the same regardless of the reason for the update: +* OAuth2 authorization, if your admin has registered an OAuth Application for Codefresh +* Git access token authentication, by generating a personal access token in your Git provider account with the correct scopes + +**Before you begin** +* To authenticate through a Git access token, make sure your token is valid and has [the required scopes]({{site.baseurl}}/docs/reference/git-tokens) + +**How to** +1. Do one of the following: + * If you see a notification in the Codefresh UI about invalid Runtime tokens, click **[Update Token]**. + The Runtimes page shows Runtimes with invalid tokens prefixed by the key icon. Mouse over shows invalid token. + * To update an existing token, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. Select the Runtime for which to update the Git token. +1. From the context menu with the additional actions at the top-right, select **Update Git Runtime token**. + + {% include + image.html + lightbox="true" + file="/images/runtime/update-git-runtime-token.png" + url="/images/runtime/update-git-runtime-token.png" + alt="Update Git runtime token option" + caption="Update Git runtime token option" + max-width="40%" +%} + +{:start="4"} +1. Do one of the following: + * If your admin has set up OAuth access, click **Authorize Access to Git Provider**. Go to _step 5_. + * Alternatively, authenticate with an access token from your Git provider. Go to _step 6_. + +{:start="5"} +1. For OAuth2 authorization: + > If the application is not registered, you get an error. Contact your admin for help. + * Enter your credentials, and select **Sign In**. + * If required, as for example if two-factor authentication is configured, complete the verification. + + {% include + image.html + lightbox="true" + file="/images/administration/user-settings/oauth-user-authentication.png" + url="/images/administration/user-settings/oauth-user-authentication.png" + alt="Authorizing access with OAuth2" + caption="Authorizing access with OAuth2" + max-width="30%" + %} + +{:start="6"} +1. For Git token authentication, expand **Advanced authorization options**, and then paste the generated token in the **Git runtime token** field. + +1. Click **Update Token**. + +## Monitoring GitOps Runtimes +* [View/download logs to troubleshoot Runtimes](#viewdownload-logs-to-troubleshoot-runtimes) +* [(Hybrid GitOps) Restoring provisioned Runtimes](#hybrid-gitops-restoring-provisioned-runtimes) +* [(Hybrid GitOps) Configure browser to allow insecure Runtimes](#hybrid-gitops-configure-browser-to-allow-insecure-runtimes) +* [(Hybrid GitOps) View notifications in Activity Log](#hybrid-gitops-view-notifications-in-activity-log) +* [(Hybrid GitOps) Troubleshoot health and sync errors for Runtimes](#hybrid-gitops-troubleshoot-health-and-sync-errors-for-runtimes) + +### View/download logs to troubleshoot Runtimes +Logs are available for completed Runtimes, both for the Runtime and for individual Runtime components. Download log files for offline viewing and analysis, or view online logs for a Runtime component, and download if needed for offline analysis. Online logs support free-text search, search-result navigation, and line-wrap for enhanced readability. + +Log files include events from the date of the application launch, with the newest events listed first. + +{::nomarkdown} +

            +{:/} + +#### Download logs for Runtimes +Download the log file for a Runtime. The Runtime log is downloaded as a `.tar.gz` file, which contains the individual log files for each runtime component. + +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. If needed, switch to **List View**, and then select the runtime for which to download logs. +1. From the context menu, select **Download All Logs**. + The log file is downloaded to the Downloads folder or the folder designated for downloads, with the filename, `.tar.gz`. For example, `codefreshv2-production2.tar.gz`. + + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-download-all.png" + url="/images/runtime/runtime-logs-download-all.png" + alt="Download logs for selected runtime" + caption="Download logs for selected runtime" + max-width="40%" +%} + + +{:start="4"} +1. To view the log files of the individual components, unzip the file. + Here is an example of the folder with the individual logs. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-folder-view.png" + url="/images/runtime/runtime-logs-folder-view.png" + alt="Individual log files in folder" + caption="Individual log files in folder" + max-width="50%" +%} + +{:start="5"} +1. Open a log file with the text editor of your choice. + +{::nomarkdown} +

            +{:/} + +#### View/download logs for Runtime components +View online logs for any Runtime component, and if needed, download the log file for offline viewing and analysis. + +Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events, from the application launch to the date and time of download. + +1. In the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. +1. If needed, switch to **List View**, and then select the Runtime. +1. Select the Runtime component and then select **View Logs**. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-view-component.png" + url="/images/runtime/runtime-logs-view-component.png" + alt="View log option for individual runtime component" + caption="View log option for individual runtime component" + max-width="40%" +%} + + +{:start="4"} +1. Do the following: + * Search by free-text for any string, and click the next and previous buttons to navigate between the search results. + * To switch on line-wrap for readability, click **Wrap**. + + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-logs-screen-view.png" + url="/images/runtime/runtime-logs-screen-view.png" + alt="Runtime component log example" + caption="Runtime component log example" + max-width="50%" +%} + +{:start="5"} +1. To download the log, click **Download**. + The file is downloaded as `.log`. + +### (Hybrid GitOps) Restoring provisioned Runtimes + +In case of cluster failure, restore the provisioned Hybrid Runtime from the existing runtime installation repository. +For partial or complete cluster failures, you can restore the Runtime to either the failed cluster or to a different cluster. +Restoring the provisioned Runtime reinstalls it, leveraging the resources in the existing Runtime repo. + +Restoring the runtime: +* Applies `argo-cd` from the installation manifests in your repo to your cluster +* Associates `argo-cd` with the existing installation repo +* Applies the Runtime and `argo-cd` secrets to the cluster +* Updates the Runtime config map (`.yaml` in the `bootstrap` directory) with the new cluster configuration for these fields: + `cluster` + `ingressClassName` + `ingressController` + `ingressHost` + +{::nomarkdown} +

            +{:/} + +#### Restore a Hybrid Runtime +Reinstall the Hybrid Runtime from the existing installation repository to restore it to the same or a different cluster. + +**Before you begin** + +* Have the following information handy: + > All values must be the identical to the Runtime to be restored. + * Runtime name + * Repository URL + * Codefresh context + * Kube context: Required if you are restoring to the same cluster + +**How to** + +1. Run: + `cf runtime install --from-repo` +1. Provide the relevant values when prompted. +1. If you are performing the runtime recovery in a different cluster, verify the ingress resource configuration for `app-proxy`, `workflows`, and `default-git-source`. + If the health status remains as `Progressing`, do the following: + + * In the Runtime installation repo, check if the `ingress.yaml` files for the `app-proxy` and `workflows` are configured with the correct `host` and `ingressClassName`: + + `apps/app-proxy/overlays//ingress.yaml` + `apps/workflows/overlays//ingress.yaml` + + * In the Git Source repository, check the `host` and `ingressClassName` in `cdp-default-git-source.ingress.yaml`: + + `resources_/cdp-default-git-source.ingress.yaml` + + See the [example](#ingress-example) below. + +{:start="4"} +1. If you have managed clusters registered to the hybrid runtime you are restoring, reconnect them. + Run the command and follow the instructions in the wizard: + `cf cluster add` + +1. Verify that you have a registered Git integration: + `cf integration git list --runtime ` + +1. If needed, create a new Git integration: + `cf integration git add default --runtime --provider github --api-url https://api.github.com` + +{::nomarkdown} +

            +{:/} + +#### Ingress example +This is an example of the `ingress.yaml` for `workflows`. + + ```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + ingress.kubernetes.io/protocol: https + ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/backend-protocol: https + nginx.ingress.kubernetes.io/rewrite-target: /$2 + creationTimestamp: null + name: runtime-name-workflows-ingress + namespace: runtime-name +spec: + ingressClassName: nginx + rules: + - host: your-ingress-host.com + http: + paths: + - backend: + service: + name: argo-server + port: + number: 2746 + path: /workflows(/|$)(.*) + pathType: ImplementationSpecific +status: + loadBalancer: {} +``` + + +### (Hybrid GitOps) Configure browser to allow insecure Runtimes + +If at least one of your Hybrid Runtimes was installed in insecure mode (without an SSL certificate for the ingress controller from a CA), the UI alerts you that _At least one runtime was installed in insecure mode_. +{% include + image.html + lightbox="true" + file="/images/runtime/runtime-insecure-alert.png" + url="/images/runtime/runtime-insecure-alert.png" + alt="Insecure runtime installation alert" + caption="Insecure runtime installation alert" + max-width="100%" +%} + +All you need to do is to configure the browser to trust the URL and receive content. + +1. Select **View Runtimes** to the right of the alert. + You are taken to the Runtimes page, where you can see insecure Runtimes tagged as **Allow Insecure**. + {% include + image.html + lightbox="true" + file="/images/runtime/runtime-insecure-steps.png" + url="/images/runtime/runtime-insecure-steps.png" + alt="Insecure runtimes in Runtime page" + caption="Insecure runtimes in Runtime page" + max-width="40%" +%} +{:start="2"} +1. For _every_ insecure Runtime, select **Allow Insecure**, and when the browser prompts you to allow access, do as relevant: + +* Chrome: Click **Advanced** and then **Proceed to site**. +* Firefox: Click **Advanced** and then **Accept the risk and continue**. +* Safari: Click **Show Certificate**, and then select **Always allow content from site**. +* Edge: Click **Advanced**, and then select **Continue to site(unsafe)**. + +### (Hybrid GitOps) View notifications in Activity Log + +The Activity Log is a quick way to monitor notifications for Runtime events such as upgrades. A pull-down panel in the Codefresh toolbar, the Activity Log shows ongoing, success, and error notifications, sorted by date, starting with today's date. + +1. In the Codefresh UI, on the top-right of the toolbar, select ![](/images/pipeline/monitoring/pipeline-activity-log-toolbar.png?display=inline-block) **Activity Log**. +1. To see notifications for provisioned Runtimes, filter by **Runtime**. + + {% include image.html + lightbox="true" + file="/images/runtime/runtime-activity-log.png" + url="/images/runtime/runtime-activity-log.png" + alt="Activity Log filtered by Runtime events" + caption="Activity Log filtered by Runtime events" + max-width="30%" + %} + +{:start="3"} + +1. To see more information on an error, select the **+** sign. + +### (Hybrid GitOps) Troubleshoot health and sync errors for Runtimes +The ![](/images/icons/error.png?display=inline-block) icon with the Runtime in red indicates either health or sync errors. + +**Health errors** +Health errors are generated by Argo CD and by Codefresh for Runtime components. + +**Sync errors** +Runtimes with sync errors display an **Out of sync** status in Sync Status column. They are related to discrepancies between the desired and actual state of a Runtime component or one of the Git sources associated with the Runtime. + +**View errors** +For both views, select the Runtime, and then select **Errors Detected**. +Here is an example of health errors for a Runtime. + + {% include image.html + lightbox="true" + file="/images/runtime/runtime-health-sync-errors.png" + url="/images/runtime/runtime-health-sync-errors.png" + alt="Health errors for runtime example" + caption="Health errors for runtime example" + max-width="30%" + %} + + +### Related articles +[Add Git Sources to GitOps Runtimes]({{site.baseurl}}/docs/installation/git-sources/) +[Add external clusters to GitOps Runtimes]({{site.baseurl}}/docs/installation/managed-cluster/) +[Shared configuration repo for GitOps Runtimes]({{site.baseurl}}/docs/reference/shared-configuration) + + diff --git a/_docs/installation/runtime-architecture.md b/_docs/installation/runtime-architecture.md new file mode 100644 index 00000000..1e0841d2 --- /dev/null +++ b/_docs/installation/runtime-architecture.md @@ -0,0 +1,240 @@ +--- +title: "Runtime architectures" +description: "" +group: installation +toc: true +--- + +Overview TBD + +## Codefresh CI/CD architecture + +The most important components are the following: + +**Codefresh VPC:** All internal Codefresh services run in the VPC (analyzed in the next section). Codefresh uses Mongo and PostgreSQL to store user and authentication information. + +**Pipeline execution environment**: The Codefresh engine component is responsible for taking pipeline definitions and running them in managed Kubernetes clusters by automatically launching the Docker containers that each pipeline needs for its steps. + +**External actors**. Codefresh offers a [public API]({{site.baseurl}}/docs/integrations/ci-integrations/codefresh-api/) that is consumed both by the Web user interface and the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"}. The API is also available for any custom integration with external tools or services. + +### CI/CD topology + +If we zoom into Codefresh Services for CI/CD, we will see the following: + +{% include image.html + lightbox="true" + file="/images/installation/topology-new.png" + url="/images/installation/topology-new.png" + alt="Topology diagram" + caption="Topology diagram (click to enlarge)" + max-width="100%" + %} + +### CI/CD core components + +{: .table .table-bordered .table-hover} +|Category | Component | Function | +| -------------- | ----------| ----------| +| Core | **pipeline-manager**| Manages all CRUD operations for CI pipelines.| +| | **cfsign** | Signs server TLS certificates for docker daemons, and generates client TLS certificates for hybrid pipelines. | +| | **cf-api** | Central back-end component that functions as an API gateway for other services, and handles authentication/authorization. | +| | **context-manager**| Manages the authentications/configurations used by Codefresh CI/CD and by the Codefresh engine. | +| | **runtime-environment-manager**| Manages the different runtime environments for CI pipelines. The runtime environment for CI/CD SaaS is fully managed by Codefresh. For CI/CD Hybrid, customers can add their own runtime environments using private Kubernetes clusters. | +| Trigger | **hermes**| Controls CI pipeline trigger management. See [triggers]({{site.baseurl}}/docs/pipelines/triggers/). | +| | **nomios**| Enables triggers from Docker Hub when a new image/tag is pushed.See [Triggers from Docker Hub]({{site.baseurl}}/docs/pipelines/triggers/dockerhub-triggers/). | +| | **cronus**| Enables defining Cron triggers for CI pipelines. See [Cron triggers]({{site.baseurl}}/docs/pipelines/triggers/cron-triggers/).| +| Log | **cf-broadcaster**| Stores build logs from CI pipelines. The UI and CLI stream logs by accessing the **cf-broadcaster** through a web socket. | +| Kubernetes | **cluster-providers** | Provides an interface to define cluster contexts to connect Kubernetes clusters in CI/CD installation environments. | +| | **helm-repo-manager** | Manages the Helm charts for CI/CD installation environments through the Helm repository admin API and ChartMuseum proxy. See [Helm charts in Codefresh]({{site.baseurl}}/docs/deployments/helm/managed-helm-repository/). | +| | **k8s-monitor** | The agent installed on every Kubernetes cluster, providing information for the Kubernetes dashboards. See [Kubernetes dashboards]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/). | +| |**charts-manager** | Models the Helm chart view in Codefresh. See [Helm chart view]({{site.baseurl}}/docs/deployments/helm/helm-releases-management/). | +| | **kube-integration** | Provides an interface to retrieve required information from a Kubernetes cluster, can be run either as an http server or an NPM module. | +| | **tasker-kubernetes** | Provides cache storage for Kubernetes dashboards. See [Kubernetes dashboards]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/). | + + +## Codefresh GitOps Platform architecture + +The diagram shows a high-level view of the Codefresh GitOps installation environment, and its core components, the Codefresh Control Plane, the Codefresh Runtime, and the Codefresh Clients. + +{% include +image.html +lightbox="true" +file="/images/getting-started/architecture/arch-codefresh-simple.png" +url="/images/getting-started/architecture/arch-codefresh-simple.png" +alt="Codefresh GitOps Platform architecture" +caption="Codefresh GitOps Platform architecture" +max-width="100%" +%} + +{::nomarkdown} +
            +{:/} + +### Codefresh GitOps Control Plane +The Codefresh Control Plane is the SaaS component in the platform. External to the enterprise firewall, it does not have direct communication with the Codefresh Runtime, Codefresh Clients, or the customer's organizational systems. The Codefresh Runtime and the Codefresh Clients communicate with the Codefresh Control Plane to retrieve the required information. + + +{::nomarkdown} +
            +{:/} + +### Codefresh GitOps Runtime +The Codefresh Runtime is installed on a Kubernetes cluster, and houses the enterprise distribution of the Codefresh Application Proxy and the Argo Project. +Depending on the type of GitOps installation, the Codefresh Runtime is installed either in the Codefresh platform (Hosted GitOps), or in the customer environment (Hybrid GitOps). Read more in [Codefresh GitOps Runtime architecture](#codefresh-gitops-runtime-architecture). + + +{::nomarkdown} +
            +{:/} + +### Codefresh GitOps Clients + +Codefresh Clients include the Codefresh UI and the Codefresh CLI. +The Codefresh UI provides a unified, enterprise-wide view of deployments (runtimes and clusters), and CI/CD operations (Delivery Pipelines, workflows, and deployments) in the same location. +The Codefresh CLI includes commands to install hybrid runtimes, add external clusters, and manage runtimes and clusters. + +### Codefresh GitOps Runtime architecture +The sections that follow show detailed views of the GitOps Runtime architecture for the different installation options, and descriptions of the GitOps Runtime components. + +* [Hosted GitOps runtime architecture](#hosted-gitops-runtime-architecture) + For Hosted GitOps, the GitOps Runtime is installed on a _Codefresh-managed cluster_ in the Codefresh platform. +* Hybrid GitOps runtime architecture: + For Hybrid GitOps, the GitOps Runtime is installed on a _customer-managed cluster_ in the customer environment. The Hybrid GitOps Runtime can be tunnel- or ingress-based: + * [Tunnel-based](#tunnel-based-hybrid-gitops-runtime-architecture) + * [Ingress-based](#ingress-based-hybrid-gitops-runtime-architecture) +* GitOps Runtime components + * [Application Proxy](#application-proxy) + * [Argo Project](#argo-project) + * [Request Routing Service](#request-routing-service) + * [Tunnel Server](#tunnel-server) + * [Tunnel Client](#tunnel-client) + + +#### Hosted GitOps runtime architecture +In the hosted environment, the Codefresh Runtime is installed on a K8s cluster managed by Codefresh. + +{% include + image.html + lightbox="true" + file="/images/getting-started/architecture/arch-hosted.png" + url="/images/getting-started/architecture/arch-hosted.png" + alt="Hosted runtime architecture" + caption="Hosted runtime architecture" + max-width="100%" +%} + +#### Tunnel-based Hybrid GitOps runtime architecture +Tunnel-based Hybrid GitOps runtimes use tunneling instead of ingress controllers to control communication between the GitOps Runtime in the customer cluster and the Codefresh GitOps Platform. Tunnel-based runtimes are optimal when the cluster with the GitOps Runtime is not exposed to the internet. + +{% include + image.html + lightbox="true" + file="/images/getting-started/architecture/arch-hybrid-ingressless.png" + url="/images/getting-started/architecture/arch-hybrid-ingressless.png" + alt="Tunnel-based hybrid runtime architecture" + caption="Tunnel-based hybrid runtime architecture" + max-width="100%" +%} + + +#### Ingress-based Hybrid GitOps runtime architecture +Ingress-based runtimes use ingress controllers to control communication between the GitOps Runtime in the customer cluster and the Codefresh GitOps Platform. Ingress-based runtimes are optimal when the cluster with the GitOps Runtime is exposed to the internet. + + + +{% include + image.html + lightbox="true" + file="/images/getting-started/architecture/arch-hybrid-ingress.png" + url="/images/getting-started/architecture/arch-hybrid-ingress.png" + alt="Ingress-based hybrid runtime architecture" + caption="Ingress-based hybrid runtime architecture" + max-width="100%" +%} + + +#### Application Proxy +The GitOps Application Proxy (App-Proxy) functions as the Codefresh agent, and is deployed as a service in the GitOps Runtime. + +For tunnel-based Hybrid GitOps Runtimes, the Tunnel Client forwards the incoming traffic from the Tunnel Server using the Request Routing Service to the GitOps App-Proxy. +For Hybrid GitOps Runtimes with ingress, the App-Proxy is the single point-of-contact between the GitOps Runtime, and the GitOps Clients, the GitOps Platform, and any organizational systems in the customer environment. + + +The GitOps App-Proxy: +* Accepts and serves requests from GitOps Clients either via the UI or CLI +* Retrieves a list of Git repositories for visualization in the Client interfaces +* Retrieves permissions from the GitOps Control Plane to authenticate and authorize users for the required operations. +* Implements commits for GitOps-controlled entities, such as Delivery Pipelines and other CI resources +* Implements state-change operations for non-GitOps controlled entities, such as terminating Argo Workflows + +{::nomarkdown} +
            +{:/} + +#### Argo Project + +The Argo Project includes: +* Argo CD for declarative continuous deployment +* Argo Rollouts for progressive delivery +* Argo Workflows as the workflow engine +* Argo Events for event-driven workflow automation framework + + +{::nomarkdown} +

            +{:/} + +#### Request Routing Service +The Request Routing Service is installed on the same cluster as the GitOps Runtime in the customer environment. +It receives requests from the the Tunnel Client (tunnel-based) or the ingress controller (ingress-based), and forwards the request URLs to the Application Proxy, and webhooks directly to the Event Sources. + +>Important: + The Request Routing Service is available from runtime version 0.0.543 and higher. + Older runtime versions are not affected as there is complete backward compatibility, and the ingress controller continues to route incoming requests. + +#### Tunnel Server +Applies only to _tunnel-based_ Hybrid GitOps Runtimes. +The Codefresh Tunnel Server is installed in the Codefresh platform. It communicates with the enterprise cluster located behind a NAT or firewall. + +The Tunnel Server: +* Forwards traffic from Codefresh Clients to the client (customer) cluster. +* Manages the lifecycle of the Tunnel Client. +* Authenticates requests from the Tunnel Client to open tunneling connections. + +{::nomarkdown} +
            +{:/} + +#### Tunnel Client +Applies only to _tunnel-based_ Hybrid GitOps Runtimes. + +Installed on the same cluster as the Hybrid GitOps Runtime, the Tunnel Client establishes the tunneling connection to the Tunnel Server via the WebSocket Secure (WSS) protocol. +A single Hybrid GitOps Runtime can have a single Tunnel Client. + +The Tunnel Client: +* Initiates the connection with the Tunnel Server. +* Forwards the incoming traffic from the Tunnel Server through the Request Routing Service to App-Proxy, and other services. + +{::nomarkdown} +
            +{:/} + + +#### Customer environment +The customer environment that communicates with the GitOps Runtime and the GitOps Platform, generally includes: +* Ingress controller for ingress hybrid runtimes + The ingress controller is configured on the same Kubernetes cluster as the GitOps Runtime, and implements the ingress traffic rules for the GitOps Runtime. + See [Ingress controller requirements]({{site.baseurl}}/docs/installation/requirements/#ingress-controller). +* Managed clusters + Managed clusters are external clusters registered to provisioned Hosted or Hybrid GitOps runtimes for application deployment. + Hosted GitOps requires you to connect at least one external K8s cluster as part of setting up the Hosted GitOps environment. + Hybrid GitOps allow you to add external clusters after provisioning the runtimes. + See [Add external clusters to runtimes]({{site.baseurl}}/docs/installation/managed-cluster/). +* Organizational systems + Organizational Systems include the customer's tracking, monitoring, notification, container registries, Git providers, and other systems. They can be entirely on-premises or in the public cloud. + Either the ingress controller (ingress hybrid environments), or the Tunnel Client (tunnel-based hybrid environments), forwards incoming events to the Codefresh Application Proxy. + + ## Related articles +[Codefresh pricing](https://codefresh.io/pricing/) +[Codefresh features](https://codefresh.io/features/) + \ No newline at end of file diff --git a/_docs/installation/upgrade-gitops-cli.md b/_docs/installation/upgrade-gitops-cli.md new file mode 100644 index 00000000..30e06096 --- /dev/null +++ b/_docs/installation/upgrade-gitops-cli.md @@ -0,0 +1,87 @@ +--- +title: "Download/upgrade Codefresh CLI" +description: "Have the latest version of the Codefresh CLI for GitOps runtimes" +group: installation +toc: true +--- + +You need the Codefresh CLI to install Hybrid GitOps Runtimes, and have access to all the newest features. +For the initial download, you need to generate an API key and create the API authentication context, which you do from the UI. +When newer versions are available, the CLI automatically notifies you through a banner. You can use the existing API credentials for the upgrade. + + +## GitOps CLI installation modes +The table lists the modes available to install the Codefresh CLI. + +{: .table .table-bordered .table-hover} +| Install mode | OS | Commands | +| -------------- | ----------| ----------| +| `curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| +| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | +| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | +| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| +| `brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`|```` + +## Install the GitOps CLI +Install the Codefresh CLI using the option that best suits you: `curl`, `brew`, or standard download. +If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for CLI installation. + +1. Do one of the following: + * For first-time installation, go to the Welcome page, select **+ Install Runtime**. + * If you have provisioned a GitOps Runtime, in the Codefresh UI, go to [GitOps Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtime**. +1. Install the Codefresh CLI: + * Select one of the installation modes. + * Generate the API key. + * Create the authentication context: + `cf config create-context codefresh --api-key ` + + + {% include + image.html + lightbox="true" + file="/images/getting-started/quick-start/quick-start-download-cli.png" + url="/images/getting-started/quick-start/quick-start-download-cli.png" + alt="Download CLI to install runtime" + caption="Download CLI to install runtime" + max-width="30%" + %} + + +{::nomarkdown} +

            +{:/} + + +## Upgrade the GitOps CLI + +The Codefresh CLI automatically self-checks its version, and if a newer version is available, prints a banner with the notification. + + {% include + image.html + lightbox="true" + file="/images/runtime/cli-upgrade-banner.png" + url="/images/runtime/cli-upgrade-banner.png" + alt="Upgrade banner for Codefresh CLI" + caption="Upgrade banner for Codefresh CLI" + max-width="40%" + %} + + +You can upgrade to a specific version if you so require, or download the latest version to an output folder to upgrade at your convenience. + + +* Do any of the following: + * To upgrade to the latest version, run: + `cf upgrade` + * To upgrade to a specific version, even an older version, run: + `cf upgrade --version v` + where: + `` is the version you want to upgrade to. + * To download the latest version to an output file, run: + `cf upgrade --version v -o ` + where: + * `` is the path to the destination file, for example, `/cli-download`. + +## Related articles +[Hosted GitOps Runtime setup]({{site.baseurl}}/docs/installation/hosted-runtime) +[Hybrid GitOps Runtime installation]({{site.baseurl}}/docs/installation/hybrid-gitops) diff --git a/_docs/integrations/ci-integrations.md b/_docs/integrations/ci-integrations.md deleted file mode 100644 index f43ad8e4..00000000 --- a/_docs/integrations/ci-integrations.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: "CI integrations" -description: "" -group: integrations -toc: true ---- - -Use Codefresh's Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. - -You can connect a third-party CI solution to Codefresh, such as GitHub Actions for example, to take care of common CI tasks such as building/testing/scanning source code, and have Codefresh Hosted GitOps still responsible for the deployment, including image enrichment and reporting. -The integration brings in all the CI information to your images which you can see in the Images dashboard. - -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). - -### Codefresh image reporting and enrichment action -To support the integration between Codefresh and third-party CI platforms and tools, we have created dedicated actions for supported CI tools in the Codefresh Marketplace. These actions combine image enrichment and reporting through integrations with issue tracking and container registry tools. - ->You can also configure the integration directly in the Codefresh UI, as described in [Connect a third-party CI platform/tool to Codefresh](#connect-a-third-party-ci-platformtool-to-codefresh). - - -Use the action as follows: - -1. Create your pipeline with your CI platform/tool as you usually do. -1. Use existing CI actions for compiling code, running unit tests, security scanning etc. -1. Place the final action in the pipeline as the "report image" action provided by Codefresh. - See: - [GitHub Action Codefresh report image](https://github.com/marketplace/actions/codefresh-report-image){:target="\_blank"} - [Codefresh Classic Codefresh report image](https://codefresh.io/steps/step/codefresh-report-image){:target="\_blank"} -1. When the pipeline completes execution, Codefresh retrieves the information on the image that was built and its metadata through the integration names specified (essentially the same data that Codefresh CI would send automatically). -1. View the image in Codefresh's [Images dashboard]({{site.baseurl}}/docs/deployment/images/), and in any [application]({{site.baseurl}}/docs/deployment/applications-dashboard/) in which it is used. - -### Connect a third-party CI platform/tool to Codefresh -Connecting the CI platform/tool to Codefresh from the UI includes configuring the required arguments, and then generating and copying the YAML manifest for the report image to your pipeline. - -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. -1. Filter by **CI tools**, then select the CI tool and click **Add**. -1. Define the arguments for the CI tool: - [Codefresh Classic]({{site.baseurl}}/docs/integrations/ci-integrations/codefresh-classic/) - [GitHub Action]({{site.baseurl}}/docs/integrations/ci-integrations/github-actions/) - [Jenkins]({{site.baseurl}}/docs/integrations/ci-integrations/jenkins/) - - For the complete list of arguments you can use, see [CI integration argument reference](#ci-integration-argument-reference) in this article. - -1. To generate a YAML snippet with the arguments, on the top-right, click **Generate Manifest**. - Codefresh validates the generated manifest, and alerts you to undefined arguments that are required, and other errors. - - {% include image.html -lightbox="true" -file="/images/integrations/generated-manifest-with-error.png" -url="/images/integrations/generated-manifest-with-error.png" -alt="Example of manifest generated for Codefresh Classic with validation errors" -caption="Example of manifest generated for Codefresh Classic with validation errors" -max-width="50%" -%} - -{:start="5"} -1. If required, click **Close**, update as needed and generate the manifest again. -1. If there are no validation errors, click **Copy**. - -{% include image.html -lightbox="true" -file="/images/integrations/classic/classic-manifest.png" -url="/images/integrationsclassic/classic-manifest.png" -alt="Example of manifest generated for Codefresh Classic" -caption="Example of manifest generated for Codefresh Classic" -max-width="50%" -%} - -{:start="6"} -1. Paste the copied manifest as the last step in your CI pipeline. - -### CI integration argument reference -The table describes _all_ the arguments required for CI integrations in general. The actual arguments required, differs according to the CI integration tool. - -{: .table .table-bordered .table-hover} -| Argument | Description | Required/Optional/Default | -| ---------- | -------- | ------------------------- | -| `CF_HOST` | _Deprecated from v 0.0.460 and higher._ Recommend using `CF_RUNTIME_NAME` instead. {::nomarkdown}
            CF_HOST has been deprecated because the URL is not static, and any change can fail the enrichment.

            The URL to the cluster with the Codefresh runtime to integrate with. If you have more than one runtime, select the runtime from the list. Codefresh displays the URL of the selected runtime cluster.{:/} | _Deprecated_ | -| `CF_RUNTIME_NAME` | The runtime to use for the integration. If you have more than one runtime, select the runtime from the list. | Required | -| `CF_PLATFORM_URL` | The root URL of the Codefresh application. The default value is `https://g.codefresh.io`. | Optional | -| `CF_API_KEY` | The API key for authentication. Generate the key for the integration. | Required | -| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. See [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). | Optional | -| `CF_JIRA_INTEGRATION` | _Deprecated from version 0.0.565 and higher._ Replaced by `CF_ISSUE_TRACKING_INTEGRATION`. | _Deprecated_ -| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use for image enrichment. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/integrations/issue-tracking/jira/). | Optional | -| `CF_IMAGE` | The image to be enriched and reported in Codefresh. Pass the `[account-name]/[image-name]:[tag]` built in your CI. | Required | -| `CF_WORKFLOW_NAME` | The name assigned to the workflow that builds the image. When defined, the name is displayed in the Codefresh platform. Example, `Staging step` | Optional | -| `CF_GIT_BRANCH` | The Git branch with the commit and PR (pull request) data to add to the image. Pass the Branch from the event payload used to trigger your action. | Required | -| `CF_GIT_REPO` | The Git repository with the configuration and code used to build the image. {::nomarkdown}
            • Optional for GitHub Actions.
            • Required for Classic and Jenkins.
              • {:/} | Required | -| `CF_GIT_PROVIDER` | The Git provider for the integration, and can be either `github`, `gitlab`, or `bitbucket`. {::nomarkdown}
                • Optional when you don't define other related Git provider arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                • Required when you define at least one of the Git provider arguments. For example, when you define CF_GITLAB_TOKEN, then you must define all Git provider arguments, in this case, CF_GIT_PROVIDER as gitlab, and CF_GITLAB_HOST_URL.
                  • {:/}| Optional | -| `CF_GITLAB_TOKEN` | The token to authenticate the GitLab account. {::nomarkdown}
                    • Optional when you don't define any GitLab-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                    • Required when you define at least one of the GitLab-specific arguments, such as CF_GIT_PROVIDER as gitlab, or CF_GITLAB_HOST_URL.
                      • {:/} | Optional | -| `CF_GITLAB_HOST_URL` | The URL address of your GitLab Cloud/Server instance. {::nomarkdown}
                        • Optional when you don't define other related GitLab-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                        • Required when you define at least one of the GitLab-specific arguments, such as CF_GIT_PROVIDER as gitlab, or CF_GITLAB_TOKEN.
                          • {:/} | Optional | -| `CF_BITBUCKET_USERNAME` | The username for the Bitbucket or the Bitbucket Server (on-prem) account. {::nomarkdown}
                            • Optional when you don't define other related Bitbucket-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                            • Required when you define at least one of the Bitbucket-specific arguments, such as CF_GIT_PROVIDER as bitbucket, CF_BITBUCKET_PASSWORD or CF_BITBUCKET_HOST_URL.
                              • {:/}| Optional | -| `CF_BITBUCKET_PASSWORD` | The password for the Bitbucket or the BitBucket Server (on-prem) account. {::nomarkdown}
                                • Optional when you don't define other related Bitbucket-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                • Required when you define at least one of the Bitbucket-specific arguments, such as CF_GIT_PROVIDER as bitbucket, CF_BITBUCKET_USERNAME, or CF_BITBUCKET_HOST_URL.
                                  • {:/}| Optional | -| `CF_BITBUCKET_HOST_URL` | Relevant for Bitbucket Server accounts only. The URL address of your Bitbucket Server instance. Example, `https://bitbucket-server:7990`. {::nomarkdown}
                                    • Optional when you don't define other related Bitbucket Server-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                    • Required when you define at least one of the Bitbucket Server-specific arguments, such as CF_GIT_PROVIDER as bitbucket, CF_BITBUCKET_USERNAME or CF_BITBUCKET_PASSWORD.
                                    {:/} | Optional | -|`CF_JIRA_PROJECT_PREFIX` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira project prefix that identifies the ticket number to use.| Required| -| `CF_JIRA_MESSAGE` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira issue IDs matching the string to associate with the image. | Required | -| `CF_JIRA_FAIL_ON_NOT_FOUND` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The report image action when the `CF_JIRA_MESSAGE` is not found. When set to `true`, the report image action is failed. | Required | - -### Related articles -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) -[Issue tracking intergrations]({{site.baseurl}}/docs/integrations/issue-tracking/) - - - - - - diff --git a/_docs/integrations/ci-integrations/codefresh-classic.md b/_docs/integrations/ci-integrations/codefresh-classic.md index e15ab68e..ce01417e 100644 --- a/_docs/integrations/ci-integrations/codefresh-classic.md +++ b/_docs/integrations/ci-integrations/codefresh-classic.md @@ -41,7 +41,7 @@ reportImage: CF_HOST: '[runtime-host-url]' # Codefresh API key !! Committing a plain text token is a security risk. We highly recommend using encrypted secrets !! - # Documentation - https://codefresh.io/docs/docs/configure-ci-cd-pipeline/secrets-store/ + # Documentation - https://codefresh.io/docs/docs/pipelines/secrets-store/ CF_API_KEY: ${{API_KEY}} # Image path to enrich @@ -102,7 +102,7 @@ For how-to instructions, see [Connect a third-party CI platform/tool to Codefres Arguments such as `CF_IMAGE`, `CF_GIT_BRANCH`, and `CF_JIRA_MESSAGE` are populated dynamically when the Codefresh Classic integration pipeline is triggered. You can templatize the values of these arguments to ensure that the required information is included in the reported image. -Codefresh Classic offers [system variables](https://codefresh.io/docs/docs/codefresh-yaml/variables/#system-provided-variables) you can use to templatize argument values. +Codefresh Classic offers [system variables](https://codefresh.io/docs/docs/pipelines/variables/#system-provided-variables) you can use to templatize argument values. {::nomarkdown}
                                    diff --git a/_docs/integrations/ci-integrations/github-actions.md b/_docs/integrations/ci-integrations/github-actions.md deleted file mode 100644 index fa6dd1c8..00000000 --- a/_docs/integrations/ci-integrations/github-actions.md +++ /dev/null @@ -1,258 +0,0 @@ ---- -title: "GitHub Actions" -description: "" -group: integrations -sub_group: ci-integrations -toc: true ---- - -Use Codefresh Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. -GitHub Actions is one of the third-party CI solutions that you can connect to Codefresh for deployment with image reporting and enrichment. - - Connecting a GitHub Action, adds the CI information to images which are displayed in the Images dashboard, as in the example below. - - {% include - image.html - lightbox="true" - file="/images/integrations/images-dashboard.png" - url="/images/integrations/images-dashboard.png" - alt="Images dashboard with enriched image information" - caption="Images dashboard with enriched image information" - max-width="70%" - %} - -For information on how to use the image reporting action in your GitHub Action pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). - - -### Example of GitHub Actions pipeline with Codefresh report image action - - -Here is an example pipeline that uses GitHub Actions to build a container image, and the Codefresh action to enrich and report the resulting image to Codefresh. - -Because a Jira integration account is configured in Codefresh, the step needs only the name for `CF_JIRA_INTEGRATION`, instead of explicit credentials `CF_JIRA_API_TOKEN`, `CF_JIRA_HOST_URL`, and `CF_JIRA_EMAIL`. - - -{% highlight yaml %} -{% raw %} - -name: Docker Image CI - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] -jobs: - build: - environment: - name: test - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v3 - - name: Login to DockerHub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build & push the Docker image - env: - CF_IMAGE: ${{ secrets.DOCKERHUB_USERNAME }}/build-by-github-action:0.0.1 - run: | - docker build . --file Dockerfile --tag $CF_IMAGE && docker push $CF_IMAGE - echo "Image should be accessible to your local machine (after docker login) by:" - echo "docker pull $CF_IMAGE" - docker pull $CF_IMAGE - echo "On the next step, the report image would use the integration to pull information on the reported image, using the specified enrichers." - - name: report image by action - with: - # Name of runtime to implement the enrichment - CF_RUNTIME_NAME: 'codefresh-hosted' - - # Codefresh API key !! Committing a plain text token is a security risk. We highly recommend using encrypted secrets. !! - # Documentation - https://docs.github.com/en/actions/security-guides/encrypted-secrets - CF_API_KEY: ${{ secrets.USER_TOKEN }} - - # Name of Container registry integration - CF_CONTAINER_REGISTRY_INTEGRATION: 'docker' - - # The git branch which is related for the commit - CF_GIT_BRANCH: 'main' - - # Image path to enrich - CF_IMAGE: ${{ secrets.DOCKERHUB_USERNAME }}/build-by-github-action:0.0.1 - - # GitHub Access token !! Committing a plain text token is a security risk. We highly recommend using encrypted secrets. !! - # Documentation - https://docs.github.com/en/actions/security-guides/encrypted-secrets - CF_GITHUB_TOKEN: ${{ secrets.CF_GITHUB_TOKEN }} - - # Name of Jira integration - CF_ISSUE_TRACKING_INTEGRATION: 'jira' - - # String starting with the issue ID to associate with image - CF_JIRA_MESSAGE: 'CR-11027' - - # Jira project filter - CF_JIRA_PROJECT_PREFIX: "CR" - uses: codefresh-io/codefresh-report-image@latest - - -{% endraw %} -{% endhighlight yaml %} - -### GitHub Action-Codefresh integration arguments -The table describes the arguments required to connect a GitHub Action to Codefresh. - - - {: .table .table-bordered .table-hover} -| Argument | Description | Required/Optional/Default | -| ---------- | -------- | ------------------------- | -| `CF_HOST` | _Deprecated from v 0.0.460 and higher._ Recommend using `CF_RUNTIME_NAME` instead. {::nomarkdown}
                                    CF_HOST has been deprecated because the URL is not static, and any change can fail the enrichment.

                                    The URL to the cluster with the Codefresh runtime to integrate with. If you have more than one runtime, select the runtime from the list. Codefresh displays the URL of the selected runtime cluster.{:/} | Required | -| `CF_RUNTIME_NAME` | The runtime to use for the integration. If you have more than one runtime, select the runtime from the list. | Required | -| `CF_PLATFORM_URL` | The root URL of the Codefresh application. The default value is `https://g.codefresh.io`. | Optional | -| `CF_API_KEY` | The API key to authenticate the GitHub Actions user to Codefresh. Generate the key for the GitHub Action. {::nomarkdown}
                                    Enter this token in GitHub Actions as a secret with the name CF_API_KEY. You can then reference it in all GitHub pipelines as you would any other secret.{:/}| Required | -| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. {::nomarkdown}
                                    • For a GitHub Container registry, select GHCR_GITHUB_TOKEN_AUTHENTICATION even if you have not created an integration in Codefresh.
                                      Codefresh retrieves and provides the explicit credentials for the container registry on generating the integration manifest.
                                    • To create a container registry integration if you don't have one, click Create Container Registry Integration, and then configure the settings.
                                      See Container registry integrations.
                                    {:/} | Optional | -| `CF_GIT_REPO` | The Git repository with the configuration and code used to build the image. If not defined, Codefresh retrieves it from the repo defined for the GitHub Action. | Required | -| `CF_JIRA_INTEGRATION` | Deprecated from version 0.0.565. Replaced by `CF_ISSUE_TRACKING_INTEGRATION`. | _Deprecated_ -| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/integrations/issue-tracking/jira/). | Optional | -| `CF_IMAGE` | The image to be enriched and reported in Codefresh. Pass the `[account-name]/[image-name]:[tag]` built in your CI. | Required | -| `CF_WORKFLOW_NAME` | The name assigned to the workflow that builds the image. When defined, the name is displayed in the Codefresh platform. Example, `Staging step` | Optional | -| `CF_GIT_BRANCH` | The Git branch with the commit and PR (pull request) data to add to the image. Pass the Branch from the event payload used to trigger your action. | Required | -| `CF_GITHUB_TOKEN` | The GitHub authentication token. See [Git tokens]({{site.baseurl}}/docs/reference/git-tokens/#git-personal-tokens). | Required | -|`CF_JIRA_PROJECT_PREFIX` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira project prefix that identifies the ticket number to use.| Required| -| `CF_JIRA_MESSAGE` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira issue IDs matching the string to associate with the image. | Required | -| `CF_JIRA_FAIL_ON_NOT_FOUND` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The report image action when the `CF_JIRA_MESSAGE` is not found. When set to `true`, the report image action is failed. | Required | - - -For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh]({{site.baseurl}}/docs/integrations/ci-integrations/#connect-a-third-party-ci-platformtool-to-codefresh). -### Templatization examples for CF arguments - -Arguments such as `CF_IMAGE`, `CF_GIT_BRANCH`, and `CF_JIRA_MESSAGE` are populated dynamically when the GitHub Actions pipeline is triggered. You can templatize the values of these arguments to ensure that the required information is included in the reported image. - -See GitHub Actions [environment variables](https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables) you can use to templatize argument values. - -{::nomarkdown} -
                                    -{:/} - -#### CF_IMAGE - -**Example: Report full repo and branch information** -This example illustrates how to define the value for `CF_IMAGE` to report the repo owner, name, and short branch, with the Git hash. - - Value: - {% raw %}`${{ github.repository }}/${{ github.ref_name }}/${{ github.sha }}`{% endraw %} - - where: - * {% raw %}`${{ github.repository }}`{% endraw %} reports the owner of the repository and the name of the repository. For example, `nr-codefresh/codefresh-production`. - * {% raw %}`${{ github.ref_name }}`{% endraw %} reports the short reference to the branch that triggered the workflow. For example, `auth-feature-branch`. - * {% raw %}`${{ github.sha }}`{% endraw %} reports the complete commit SHA that triggered the workflow. For example, `fa53bfa91df14c4c9f46e628a65ee21dd574490a`. - - - -**Example: Report a specific image tag** -This example illustrates how to define the value for `CF_IMAGE` when you know the specific image version you want to report. - -Value: -{% raw %}`${{ github.repository }}:`{% endraw %} - -where: -* {% raw %}`${{ github.repository }}`{% endraw %} reports the owner of the repository and the name of the repository. For example, `nr-codefresh/codefresh-production`. -* `` reports the hard-coded tag `v1.0`. - - -**Example: Report the latest Git tag available on repository** -This example illustrates how to define the value for `CF_IMAGE` to report the latest Git tag on the repository. - -Value: -{% raw %}`codefresh/${{ github.repository }}/latest`{% endraw %} - -where: -* {% raw %}`codefresh`{% endraw %} is the hard-coded owner of the image. -* {% raw %}`${{ github.repository }}`{% endraw %} reports the owner of the repository and the name of the repository. For example, `nr-codefresh/codefresh-production`. -* {% raw %}`latest`{% endraw %} reports the latest Git tag available for the repository defined by {% raw %}`${{ github.repository }}`{% endraw %}. For example, `v1.0.4-14-g2414721`. - -{::nomarkdown} -
                                    -{:/} - -#### CF_GIT_BRANCH - -**Example: Report fully-formed reference of the branch or tag** -This example illustrates how to define the value for `CF_GIT_BRANCH` to report the fully-formed reference of the branch or tag that triggered the workflow run. -For workflows triggered by push events, this is the branch or tag ref that was pushed. -For workflows triggered by pull_requests, this is the pull request merge branch. - -Value: -{% raw %}`${{ github.ref }}`{% endraw %} - -where: -* {% raw %}`${{ github.ref }}`{% endraw %} is the reference to the branch or tag. For example, `refs/heads/auth-feature-branch` (branch), and `refs/pull/#843/merge` (pull request). - -**Example: Report short reference name of the branch or tag** -This example illustrates how to define the value for `CF_GIT_BRANCH` to report only the name of the branch or tag that triggered the workflow run. - - -Value: -{% raw %}`${{ github.ref-name }}`{% endraw %} - -where: -* {% raw %}`${{ github.ref-name }}`{% endraw %} is the name of the target branch or tag. For example, `auth-feature-branch`. - -{::nomarkdown} -
                                    -{:/} - -#### CF_JIRA_MESSAGE -The Jira message represents an existing Jira issue, and must be a literal string. - - Value: - `CR-1246` - -### GitHub Action logs -View and analyze logs for GitHub Action workflows through the Logs tab. When a GitHub Action is run, it is added to the Logs tab. -You can: -* Filter by status or by date range to view a subset of actions -* Navigate to the build file in GitHub Actions, and view the Codefresh report image step - -{% include image.html -lightbox="true" -file="/images/integrations/github-actions/github-actions-logs.png" -url="/images/integrations/github-actions/github-actions-logs.png" -alt="GitHub Action: Logs tab" -caption="GitHub Action: Logs tab" -max-width="50%" -%} - -**Build YAML in GitHub Action** - -The Run column includes the link to the build files for the actions. - -Here are examples of the build file for the GitHub Action (top) and of the Codefresh report image step in the action (down). - -{% include image.html -lightbox="true" -file="/images/integrations/github-actions/action-build-yaml.png" -url="/images/integrations/github-actions/action-build-yaml.png" -alt="Build file in GitHub Action" -caption="Build file in GitHub Action" -max-width="50%" -%} - -{% include image.html -lightbox="true" -file="/images/integrations/github-actions/actiosn-cf-report-image-step.png" -url="/images/integrations/github-actions/actiosn-cf-report-image-step.png" -alt="Codefresh report image step in GitHub Action build file" -caption="Codefresh report image step in GitHub Action build file" -max-width="50%" -%} - - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) - - diff --git a/_docs/integrations/ci-integrations/jenkins.md b/_docs/integrations/ci-integrations/jenkins.md deleted file mode 100644 index ddfa1eba..00000000 --- a/_docs/integrations/ci-integrations/jenkins.md +++ /dev/null @@ -1,250 +0,0 @@ ---- -title: "Jenkins" -description: "" -group: integrations -sub_group: ci-integrations -toc: true ---- - - Use Hosted GitOps with any popular Continuous Integration (CI) solution, not just with Codefresh CI. Jenkins is one of the third-party CI platform/tools that you can connect to Codefresh for deployment with image enrichment and reporting. - - Connecting a Jenkins pipeline, adds the CI information to images which are displayed in the Images dashboard, as in the example below. - - {% include - image.html - lightbox="true" - file="/images/integrations/images-dashboard.png" - url="/images/integrations/images-dashboard.png" - alt="Images dashboard with enriched image information" - caption="Images dashboard with enriched image information" - max-width="70%" - %} - - -For information on how to use the image reporting action in your Jenkins pipeline and how to configure the integration, see [CI Integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). - -### Example of Jenkins pipeline with report image step - -{% highlight yaml %} -{% raw %} - -pipeline { - - agent any - stages { - stage('Clone repository') { - steps { - checkout scm - } - } - stage ('Build & Push ') { - environment { - CF_IMAGE= credentials('CF_IMAGE') - } - steps { - sh 'echo "Building $CF_IMAGE"' - script { - def app - app = docker.build("${env.CF_IMAGE}") - // require credentials to be stored under DOCKERHUB - docker.withRegistry('https://registry.hub.docker.com', 'DOCKERHUB') { - app.push("latest") - } - } - sh ''' - # test we have image in repository. - docker pull $CF_IMAGE - ''' - } - } - - stage('report image') { - environment { - // Name of runtime to implement the enrichment - CF_RUNTIME_NAME= 'codefresh-hosted' - - // Image path to enrich - CF_IMAGE= credentials('CF_IMAGE') - - // Codefresh API key !! Committing a plain text token is a security risk. We highly recommend using encrypted secrets. !! - // Documentation - https://www.jenkins.io/doc/book/using/using-credentials - CF_API_KEY= credentials('CF_API_KEY') - - // Name of Container registry integration - CF_CONTAINER_REGISTRY_INTEGRATION= 'docker' - - // Name of Jira integration - CF_ISSUE_TRACKING_INTEGRATION= 'jira' - - // String starting with the issue ID to associate with image - CF_JIRA_MESSAGE= 'CR-11027' - - // Jira project filter - CF_JIRA_PROJECT_PREFIX= 'CR' - - // GitHub Access token !! Committing a plain text token is a security risk. We highly recommend using encrypted secrets. !! - // Documentation - https://www.jenkins.io/doc/book/using/using-credentials - CF_GITHUB_TOKEN= credentials('CF_GITHUB_TOKEN') - } - steps { - sh ''' - export CF_CI_TYPE="jenkins" - # add workflow details - export CF_WORKFLOW_NAME="${CF_WORKFLOW_NAME:-$JOB_NAME}" - export CF_WORKFLOW_URL="${CF_WORKFLOW_URL:-$BUILD_URL}" - # add git branch - export CF_GIT_PROVIDER="${CF_GIT_PROVIDER:-github}" - WITHOUT_POSTFIX="${GIT_URL%.*}" - export CF_GIT_REPO="${CF_GIT_REPO:-${WITHOUT_POSTFIX#*//*/}}" - # slice branch name from repo/branch - export CF_GIT_BRANCH="${CF_GIT_BRANCH:-${GIT_BRANCH#*/}}" - env | cut -f 1 -d "=" | grep -E "^CF_" > cf_env - docker run --env-file=cf_env "quay.io/codefresh/codefresh-report-image:latest" - ''' - } - } - - } -} - -{% endraw %} -{% endhighlight yaml %} - -### Jenkins-Codefresh integration arguments -The table describes the arguments to connect Codefresh Classic to Codefresh. - -{: .table .table-bordered .table-hover} -| Argument | Description | Required/Optional/Default | -| ---------- | -------- | ------------------------- | -| `CF_RUNTIME_NAME` | The runtime to use for the integration. If you have more than one runtime, select the runtime from the list. | Required | -| `CF_PLATFORM_URL` | The root URL of the Codefresh application. The default value is `https://g.codefresh.io`. | Optional | -| `CF_API_KEY` | The API key to authenticate the Codefresh Classic user to Codefresh. Generate the key for the integration. | Required | -| `CF_CONTAINER_REGISTRY_INTEGRATION` | The name of the container registry integration created in Codefresh where the image is stored. To create a container registry integration if you don't have one, click **Create Container Registry Integration**, and then configure the settings. See [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). | Optional | -| `CF_JIRA_INTEGRATION` | Deprecated from version 0.0.565. Replaced by `CF_ISSUE_TRACKING_INTEGRATION`. | _Deprecated_ -| `CF_ISSUE_TRACKING_INTEGRATION` | The name of the issue tracking integration created in Codefresh to use to enrich the image. Relevant only if Jira enrichment is required for the image. If you don't have a Jira integration, click **Create Atlassian Jira Integration** and configure settings. See [Jira integration]({{site.baseurl}}/docs/integrations/issue-tracking/jira/). | Optional | -| `CF_IMAGE` | The image to be enriched and reported in Codefresh. Pass the `[account-name]/[image-name]:[tag]` built in your CI. | Required | -| `CF_GIT_BRANCH` | The Git branch with the commit and PR (pull request) data to add to the image. Pass the Branch from the event payload used to trigger your action. | Required | -| `CF_GIT_REPO` | The Git repository with the configuration and code used to build the image. | Required | -| `CF_GIT_PROVIDER` | The Git provider for the integration, and can be either `github`, `gitlab`, or `bitbucket`. {::nomarkdown}
                                    • Optional when you don't define other related Git provider arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                    • Required when you define at least one of the Git provider arguments. For example, when you define CF_GITLAB_TOKEN, then you _must_ define all Git provider arguments, in this case, CF_GIT_PROVIDER as gitlab, and CF_GITLAB_HOST_URL.
                                      • {:/}| Optional | -| `CF_GITLAB_TOKEN` | The token to authenticate the GitLab account. {::nomarkdown}
                                        • Optional when you don't define any GitLab-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                        • Required when you define at least one of the GitLab-specific arguments, such as CF_GIT_PROVIDER as gitlab, or CF_GITLAB_HOST_URL.
                                          • {:/} | Optional | -| `CF_GITLAB_HOST_URL` | The URL address of your GitLab Cloud/Server instance. {::nomarkdown}
                                            • Optional when you don't define other related GitLab-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                            • Required if you define at least one of the GitLab-specific arguments, such as CF_GIT_PROVIDER as gitlab, or CF_GITLAB_TOKEN.
                                              • {:/} | Optional | -| `CF_BITBUCKET_USERNAME` | The username for the Bitbucket or the Bitbucket Server (on-prem) account. {::nomarkdown}
                                                • Optional when you don't define other related Bitbucket-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                                • Required when you define at least one of the Bitbucket-specific arguments, such as CF_GIT_PROVIDER as bitbucket, CF_BITBUCKET_PASSWORD or CF_BITBUCKET_HOST_URL.
                                                  • {:/}| Optional | -| `CF_BITBUCKET_PASSWORD` | The password for the Bitbucket or the Bitbucket Server (on-prem) account. {::nomarkdown}
                                                    • Optional when you don't define other related Bitbucket-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                                    • Required when you define at least one of the Bitbucket-specific arguments, such as CF_GIT_PROVIDER as bitbucket, CF_BITBUCKET_USERNAME, or CF_BITBUCKET_HOST_URL.
                                                      • {:/}| Optional | -| `CF_BITBUCKET_HOST_URL` | Relevant for Bitbucket Server accounts only. The URL address of your Bitbucket Server instance. Example, `https://bitbucket-server:7990`. {::nomarkdown}
                                                        • Optional when you don't define other related Bitbucket Server-specific arguments. When not defined, Codefresh retrieves the required information from the runtime selected for the integration.
                                                        • Required when you define at least one of the Bitbucket Server-specific arguments, such as CF_GIT_PROVIDER as bitbucket, CF_BITBUCKET_USERNAME or CF_BITBUCKET_PASSWORD.
                                                        {:/} | Optional | -|`CF_JIRA_PROJECT_PREFIX` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira project prefix that identifies the ticket number to use.| Required| -| `CF_JIRA_MESSAGE` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The Jira issue IDs matching the string to associate with the image. | Required | -| `CF_JIRA_FAIL_ON_NOT_FOUND` | Relevant only when `CF_ISSUE_TRACKING_INTEGRATION` is defined. The report image action when the `CF_JIRA_MESSAGE` is not found. When set to `true`, the report image action is failed. | Required | - - -For how-to instructions, see [Connect a third-party CI platform/tool to Codefresh]({{site.baseurl}}/docs/integrations/ci-integrations/#connect-a-third-party-ci-platform-tool-to-codefresh). - -### Templatization examples for CF arguments - -Arguments such as `CF_IMAGE`, `CF_GIT_BRANCH`, and `CF_JIRA_MESSAGE` are populated dynamically when the Jenkins pipeline is triggered. You can templatize the values of these arguments in the pipeline to ensure that the required information is included in the reported image. - -Jenkins offers a Git plugin with [environment variables](https://plugins.jenkins.io/git/#plugin-content-environment-variables){:target="\_blank"} you can use to templatize argument values. - -{::nomarkdown} -
                                                        -{:/} - -#### CF_IMAGE -**Example: Report repo, branch with Git hash** -This example illustrates how to define the value for `CF_IMAGE` to report Git repo, branch, committer, and Git hash information. - - Value: - {% raw %}`${env.GIT_COMMITTER_NAME}/${env.GIT_URL}/${env.GIT_BRANCH}/${env.GIT_REVISION}`{% endraw %} - - where: - * {% raw %}`${env.GIT_COMMITTER_NAME}`{% endraw %} reports the name of the user who made the commit. For example, `nr-codefresh`. - * {% raw %}`${env.GIT_URL}`{% endraw %} reports the name of the Git repository. For example, `codefresh-production`. - * {% raw %}`${env.GIT_BRANCH}`{% endraw %} reports the name of the Git branch. For example, `pr-2345`, `new-auth-strategy`. - * {% raw %}`${env.GIT_REVISION}`{% endraw %} reports the Git SHA1 commit ID pointing to the commit that was built. For example, `fa53bfa91df14c4c9f46e628a65ee21dd574490a`. - - -**Example: Report a specific image tag** -This example illustrates how to define the value for `CF_IMAGE` when you know the specific image version you want to report. - - Value: - {% raw %}`${env.GIT_COMMITTER_NAME}/${env.GIT_URL}/`{% endraw %} - - where: - * {% raw %}`${env.GIT_COMMITTER_NAME}`{% endraw %} and {% raw %}`${env.GIT_URL}`{% endraw %} report the names of the user hwo made the commit and the repository, respectively. For example, `nr-codefresh` and `codefresh-production`, respectively. - * {% raw %}``{% endraw %} reports the hard-coded tag `v1.0`. - - -**Example: Report the latest Git tag available on repository** -This example illustrates how to define the value for `CF_IMAGE` value to report the latest Git tag on the repository. - - Value: - {% raw %}`codefresh/${env.GIT_URL}/latest`{% endraw %} - - where: - * {% raw %}`codefresh`{% endraw %} is the hard-coded re - * {% raw %}`${env.GIT_URL}`{% endraw %} reports the name of the repository that triggered the integration. - * {% raw %}`latest`{% endraw %} reports the latest Git tag available for the repository defined by {% raw %}`${env.GIT_URL}`{% endraw %}. For example, `v1.0.4-14-g2414721`. - -{::nomarkdown} -
                                                        -{:/} - -#### CF_GIT_BRANCH - -**Example: Report the fully-formed Git branch** -This example illustrates how to define the value for `CF_GIT_BRANCH` value to report the fully-formed Git branch. - - Value: - {% raw %}`${env.GIT_URL}/${env.GIT_BRANCH}`{% endraw %} - - where: - * {% raw %}`${env.GIT_URL}`{% endraw %} is the name of the repository that triggered the pipeline. For example, `codefresh-production`. - * {% raw %}`${env.GIT_BRANCH}`{% endraw %} is the fully-formed name of the Git branch. For example, `origin/auth-feature-branch`. - - -**Example: Report the local Git branch** -This example illustrates how to define the value for `CF_GIT_BRANCH` value to report only the branch in the repository that triggerred the pipeline. - - Value: - {% raw %}`${env.GIT_URL}/${env.GIT_LOCAL_BRANCH}`{% endraw %} - - where: - * {% raw %}`${env.GIT_URL}`{% endraw %} is the name of the repository that triggered the piepline. - * {% raw %}`${env.GIT_LOCAL_BRANCH}`{% endraw %} is the name of the Git branch. For example, `auth-feature-branch`. - -{::nomarkdown} -
                                                        -{:/} - -#### CF_JIRA_MESSAGE -The Jira message represents an existing Jira issue, and must be a literal string. - - Value: - `CR-1246` - -### Jenkins integration logs -View and analyze logs for Jenkins through the Logs tab. When a Jenkins pipeline is run, it is added to the Logs tab. -You can: -* Filter by status or by date range to view a subset of actions -* Navigate to the build file in Jenkins, and view the Codefresh report image step. - - -**Build in Jenkins** - -The Run column includes the link to the pipeline in Jenkins. - -Here is an example of the Jenkins log for the pipeline with the report image step. - -{% include image.html -lightbox="true" -file="/images/integrations/jenkins/jenkins-integration-log.png" -url="/images/integrations/jenkins/jenkins-integration-log.png" -alt="Logs for Codefresh report image step in Jenkins build" -caption="Logs for Codefresh report image step in Jenkins build" -max-width="50%" -%} - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) \ No newline at end of file diff --git a/_docs/integrations/container-registries.md b/_docs/integrations/container-registries.md deleted file mode 100644 index 2aef4ac1..00000000 --- a/_docs/integrations/container-registries.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "Container registry integrations" -description: "" -group: integrations -toc: true ---- - -Codefresh can integrate with popular container registries such as Docker Hub, JFrog Artifactory, and more. - -Adding a container registry integration in Codefresh allows you to reference the integration in third-party CI platforms/tools such as GitHub Actions and Codefresh Classic by the name of the registry integration, instead of explicit credentials. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) and [CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). - -You add a container registry integration in Codefresh by: -* Defining the integration name -* Selecting the runtime or runtimes it is shared with -* Defining the arguments -* Testing the connection -* Committing the changes - -You can add more than one integration for the same registry. Once added, Codefresh displays the list of existing integrations with their sync status. You can edit or delete any registry integration. - - - - -### Configure container registry integrations in Codefresh -Configure the settings for a container registry integration in Codefresh. - -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. -1. Filter by **Container Registry**, select the container registry, and click **Configure**. -1. If you already have integrations, click **Add**. -1. Define the arguments for the container registry: - [Amazon ECR]({{site.baseurl}}/docs/integrations/container-registries/amazon-ecr/) - [Docker Hub]({{site.baseurl}}/docs/integrations/container-registries/dockerhub/) - [GitHub Container Registry]({{site.baseurl}}/docs/integrations/container-registries/github-cr/) - [JFrog Artifactory]({{site.baseurl}}/docs/integrations/container-registries/jfrog/) - [Quay]({{site.baseurl}}/docs/integrations/container-registries/quay/) -1. To test the connection to the container registry before committing the changes, click **Test Connection**. -1. To confirm, click **Commit**. - It may take a few moments for the new integration to be synced to the cluster before it appears in the list. - -### Integration resource in shared configuration repo -The integration resource for the container registry is created in the Git repository with the shared configuration, within `resources`. -The exact location depends on whether the integration is shared with all or specific runtimes: -* All runtimes: Created in `resources/all-runtimes-all-clusters/` -* Selected runtimes: Created in `resources/runtimes//` - -### View container registry integrations -Selecting a container registry integration displays the existing integrations for that registry in Codefresh. -The example below shows integrations for JFrog Artifactory. - -{% include image.html -lightbox="true" -file="/images/integrations/jfrog/jfrog-int-list.png" -url="/images/integrations/jfrog/jfrog-int-list.png" -alt="JFrog integrations in Codefresh" -caption="JFrog integrations in Codefresh" -max-width="70%" -%} - -Every container registry integration displays the following information: -* Name of the integration -* Runtime or runtimes it is shared with -* Sync status - -### Edit/delete container registry integrations -If you have existing integrations, you can change the connection details, or delete an integration. ->Deleting an integration deletes the integration resource from the shared configuration Git repo, its secrets, the CI workflows that -use it. - -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. -1. Filter by **Container Registry**, and select the specific container registry integration. -1. In the row with the integration to edit or delete, click the three dots and select **Edit** or **Delete**. -1. To edit, update the **Username** and **Password** fields, and click **Test Connection** to verify the account credentials. -1. To delete, type **DELETE** in the text box as instructed. - - {% include - image.html - lightbox="true" - file="/images/integrations/jfrog/delete-jfrog.png" - url="/images/integrations/jfrog/delete-jfrog.png" - alt="Delete container registry integration" - caption="Delete container registry integration" - max-width="50%" - %} - -### Related articles -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Images]({{site.baseurl}}/docs/deployment/images/) -[Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/) -[Add Git sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) diff --git a/_docs/integrations/container-registries/amazon-ecr.md b/_docs/integrations/container-registries/amazon-ecr.md deleted file mode 100644 index 17aee861..00000000 --- a/_docs/integrations/container-registries/amazon-ecr.md +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: "Amazon ECR" -description: "" -group: integrations -sub_group: container-registries -toc: true ---- - -Codefresh has native support for interacting with Amazon ECR (Elastic Container Registry), to push, pull, and deploy images. -For information on adding an Amazon ECR integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). - ->Amazon ECR integration is supported only in hybrid runtimes. - -### Prerequisites -Before you configure settings in Codefresh to integrate Amazon ECR: -* [Create an IAM (Identity and Access Management) role](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html){:target="\_blank"} - -Define the role in trusted relationships with `Effect: Allow` and `Action: sts:AssumeRole` on the EKS cluster. -For example: -```yaml -{ - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::XXXXX:role/eksctl-awscluster-ServiceRole-XXXXXX" - }, - "Action": "sts:AssumeRole", - "Condition": {} - }, -``` -For detailed information, see [How Amazon Elastic Container Registry Works with IAM](https://docs.aws.amazon.com/AmazonECR/latest/userguide/security_iam_service-with-iam.html){:target="\_blank"} and the [AWS security blog](https://aws.amazon.com/blogs/security/how-to-use-trust-policies-with-iam-roles/){:target="\_blank"}. - -### Amazon ECR integration settings in Codefresh -The table describes the arguments required to integrate Amazon ECR in Codefresh. - -{: .table .table-bordered .table-hover} -| Setting | Description | -| ---------- | -------- | -| **Integration name** | A friendly name for the integration. This is the name you will reference in the third-party CI platform/tool. | -| **All Runtimes/Selected Runtimes** | {::nomarkdown} The runtimes in the account with which to share the integration resource.
                                                        The integration resource is created in the Git repository with the shared configuration, within resources. The exact location depends on whether the integration is shared with all or specific runtimes:
                                                        • All runtimes: Created in resources/all-runtimes-all-clusters/
                                                        • Selected runtimes: Created in resources/runtimes//
                                                        You can reference the Docker Hub integration in the CI tool. {:/}| -| **IAM Role** | The name of the IAM role you defined with the specific permissions for authentication to the ECR. | -| **Region** | The geographic region hosting the container registry. Define the region nearest to you.| -| **Test connection** | Click to verify that you can connect to the specified instance before you commit changes. | - - - {% include - image.html - lightbox="true" - file="/images/integrations/aws/aws-int-settings.png" - url="/images/integrations/aws/aws-int-settings.png" - alt="Amazon ECR for image enrichment" - caption="Amazon ECR for image enrichment" - max-width="50%" - %} - -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking]({{site.baseurl}}/docs/integrations/issue-tracking/) \ No newline at end of file diff --git a/_docs/integrations/container-registries/dockerhub.md b/_docs/integrations/container-registries/dockerhub.md deleted file mode 100644 index d74025c9..00000000 --- a/_docs/integrations/container-registries/dockerhub.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: "Docker Hub" -description: "" -group: integrations -sub_group: container-registries -toc: true ---- - -Codefresh has native support for interacting with Docker Hub registries, to push, pull, and deploy images. -For information on adding a Docker Hub integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). - -### Prerequisites -Before you configure settings in Codefresh to integrate Docker Hub registry, do the following: - -* [Create an account or sign in to your account at Docker Hub](https://hub.docker.com/signup){:target="\_blank"} -* (Optional) [Enable 2FA (Two-Factor Authentication)](https://docs.docker.com/docker-hub/2fa/){:target="\_blank"} -* [Create a personal account token](https://docs.docker.com/docker-hub/access-tokens/){:target="\_blank"} - -### Docker Hub integration settings in Codefresh -The table describes the arguments required to integrate Docker Hub to Codefresh. - -{: .table .table-bordered .table-hover} -| Setting | Description | -| ---------- | -------- | -| **Integration name** | A friendly name for the integration. This is the name you will reference in the third-party CI platform/tool. | -| **All Runtimes/Selected Runtimes** | {::nomarkdown} The runtimes in the account with which to share the integration resource.
                                                        The integration resource is created in the Git repository with the shared configuration, within resources. The exact location depends on whether the integration is shared with all or specific runtimes:
                                                        • All runtimes: Created in resources/all-runtimes-all-clusters/
                                                        • Selected runtimes: Created in resources/runtimes//
                                                        You can reference the Docker Hub integration in the CI tool. {:/}| -| **Username** | The Docker Hub username.| -| **Password** | If you enabled two-factor authentication, enter the personal access token for your Docker Hub account for Codefresh to push images. Personal access tokens are more secure and can be revoked when needed. Codefresh can then push your images. If two-factor authentication is not enabled, enter the password of your Docker Hub account (not recommended).| -| **Test connection** | Click to verify that you can connect to the specified instance before you commit changes. | - - - {% include - image.html - lightbox="true" - file="/images/integrations/docker-registries/docker-hub.png" - url="/images/integrations/docker-registries/docker-hub.png" - alt="Docker Hub integration for image enrichment" - caption="Docker Hub integration for image enrichment" - max-width="50%" - %} - -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) - diff --git a/_docs/integrations/container-registries/github-cr.md b/_docs/integrations/container-registries/github-cr.md deleted file mode 100644 index f8aa77a7..00000000 --- a/_docs/integrations/container-registries/github-cr.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: "GitHub Container Registry (GHCR)" -description: "" -group: integrations -sub_group: container-registries -toc: true ---- - -The GitHub Container registry allows you to host and manage your Docker container images in your personal or organisation account on GitHub. One of the benefits is that permissions can be defined for the Docker image independent from any repository. Thus, your repository could be private and your Docker image public. -For information on adding a GitHub Container registry integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). - -### Prerequisites -Before you configure settings in Codefresh to integrate GitHub container registry: -* Make sure you have a personal access token with the correct scopes or create one. - You need at least the following scopes: - * `write:packages` - * `read:packages` - * `delete:packages` - * `repo` (if your repository is private; do not select if it is public) - - For detailed information, see the [Authenticating to the Container registry](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#authenticating-to-the-container-registry){:target="\_blank"}. - - -### GitHub Container registry (GHCR) integration settings in Codefresh - -{: .table .table-bordered .table-hover} -| Setting | Description | -| ---------- | -------- | -| **Integration name** | A friendly name for the integration. This is the name you will reference in the third-party CI platform/tool. | -| **All Runtimes/Selected Runtimes** | {::nomarkdown} The runtimes in the account with which to share the integration resource.
                                                        The integration resource is created in the Git repository with the shared configuration, within resources. The exact location depends on whether the integration is shared with all or specific runtimes:
                                                        • All runtimes: Created in resources/all-runtimes-all-clusters/
                                                        • Selected runtimes: Created in resources/runtimes//
                                                        {:/}| -| **Domain** | The GitHub registry domain and is set to `ghcr.io`.| -| **Username** | Your GitHub username.| -| **GitHub Token** | Your GitHub PAT (personal access token).| -|**Test Connection** | Click to verify that you can connect to the specified instance before you commit changes. | - - - {% include - image.html - lightbox="true" - file="/images/integrations/githubcr/githubcr-int-settings.png" - url="/images/integrations/githubcr/githubcr-int-settings.png" - alt="GitHub Container registry integration" - caption="GitHub Container registry integration" - max-width="50%" - %} - -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking]({{site.baseurl}}/docs/integrations/issue-tracking/) diff --git a/_docs/integrations/container-registries/jfrog.md b/_docs/integrations/container-registries/jfrog.md deleted file mode 100644 index ccb22dd0..00000000 --- a/_docs/integrations/container-registries/jfrog.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: "JFrog Artifactory" -description: "" -group: integrations -sub_group: container-registries -toc: true ---- - -Codefresh has native support for interacting with JFrog Artifactory. -For information on adding a JFrog Artifactory integration in Codefresh, see [Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/). - - - -### JFrog Artifactory integration settings in Codefresh - -{: .table .table-bordered .table-hover} -| Setting | Description | -| ---------- | -------- | -| **Integration name** | A friendly name for the integration. This is the name you will reference in the third-party CI platform/tool. | -| **All Runtimes/Selected Runtimes** | {::nomarkdown} The runtimes in the account with which to share the integration resource.
                                                        The integration resource is created in the Git repository with the shared configuration, within resources. The exact location depends on whether the integration is shared with all or specific runtimes:
                                                        • All runtimes: Created in resources/all-runtimes-all-clusters/
                                                        • Selected runtimes: Created in resources/runtimes//
                                                        {:/}| -| **Server Name** | The URL of the JFrog Artifactory server instance.| -| **Username** | The JFrog Artifactory username.| -| **Password** | The JFrog Artifactory password.| -|**Test Connection** | Click to verify that you can connect to the specified instance before you commit changes. | - - - {% include - image.html - lightbox="true" - file="/images/integrations/jfrog/jfrog-int-settings.png" - url="/images/integrations/jfrog/jfrog-int-settings.png" - alt="JFrog Artifactory container registry integration" - caption="JFrog Artifactory container registry integration" - max-width="50%" - %} - -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking]({{site.baseurl}}/docs/integrations/issue-tracking/) diff --git a/_docs/integrations/container-registries/quay.md b/_docs/integrations/container-registries/quay.md deleted file mode 100644 index dfe9cf81..00000000 --- a/_docs/integrations/container-registries/quay.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -title: "Quay" -description: "" -group: integrations -sub_group: container-registries -toc: true ---- - -Codefresh has native support for interacting with Quay registries, from where you can push, pull, and deploy images. -Adding a Quay integration allows you to reference the integration in external CI tools such as GitHub Actions by the name of the integration account, instead of adding explicit credentials. See [Image enrichment overview]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) and [CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). - - -### Prerequisites - -1. [Create a Redhat/Quay account at Quay](https://quay.io/){:target="\_blank"}. -1. Optional. For Codefresh integration, [create a robot account](https://docs.quay.io/glossary/robot-accounts.html){:target="\_blank"}. - -### Quay integration settings in Codefresh - -The table describes the arguments required to integrate Quay in Codefresh. - -{: .table .table-bordered .table-hover} -| Setting | Description | -| ---------- | -------- | -| **Integration name** | A friendly name for the integration. This is the name you will reference in the third-party CI platform/tool. | -| **All Runtimes/Selected Runtimes** | {::nomarkdown} The runtimes in the account with which to share the integration resource.
                                                        The integration resource is created in the Git repository with the shared configuration, within resources. The exact location depends on whether the integration is shared with all or specific runtimes:
                                                        • All runtimes: Created in resources/all-runtimes-all-clusters/
                                                        • Selected runtimes: Created in resources/runtimes//
                                                        You can reference the Docker Hub integration in the CI tool. {:/}| -|**Domain**| Set to `quay.io`.| -|**Username**| The Quay.io username.| -|**Password**| The Quay.io encrypted password, or robot account if you created one.| - - {% include image.html - lightbox="true" - file="/images/integrations/quay/quay-int-settings.png" - url="/images/integrations/quay/quay-int-settings.png" - alt="Quay Docker Registry integration settings in Codefresh" - caption="Quay Docker Registry integration settings in Codefresh" - max-width="50%" - %} - -For how-to instructions, see [Configure container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#configure-container-registry-integrations-in-codefresh) and [Edit/delete container registry integrations in Codefresh]({{site.baseurl}}/docs/integrations/container-registries/#editdelete-container-registry-integrations). - -Make sure you have the: -* Quay domain username -* Quay domain-encrypted password or that of the robot account - - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/) diff --git a/_docs/integrations/image-enrichment-overview.md b/_docs/integrations/image-enrichment-overview.md deleted file mode 100644 index cf2d9ca2..00000000 --- a/_docs/integrations/image-enrichment-overview.md +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: "Image enrichment with integrations" -description: "" -group: integration -toc: true ---- - - - - -Image enrichment is a crucial part of the CI/CD process, adding to the quality of deployments. Image enrichment exposes metadata such as feature requests, pull requests, and logs as part of the application's deployment, for visibility into all aspects of the deployment, making it easier to track actions and identify root cause of failures. - -If you have your CI tools and our Hosted GitOps, you can still enrich and report images to the Codefresh platform with no disruptions to existing CI processes and flows. - -Codefresh has new report images templates, optimized to work with third-party CI tools/plaforms for creating pipelines and workflows. Add integration accounts in Codefresh to tools such as Jira, Docker Hub and Quay, and then connect your CI tool with Codefresh for image enrichment and reporting. - - - -### CI integration flow for image enrichment - -Integrate Codefresh with your CI platform/tool account with a unique name per integration account. - -#### 1. Add/configure integration - -Add/configure the integration account for the third-party tools. You can set up multiple integration accounts for the same tool. -When you add an integration, Codefresh creates a Sealed Secret with the integration credentials, and a ConfigMap that references the secret. - -See: -* Issue tracking - [JIRA]({{site.baseurl}}/docs/integrations/issue-tracking/jira/) - -* Container registries - [Amazon ECR]({{site.baseurl}}/docs/integrations/container-registries/amazon-ecr/) - [DockerHub]({{site.baseurl}}/docs/integrations/container-registries/dockerhub/) - [JFrog Artifactory]({{site.baseurl}}/docs/integrations/container-registries/jfrog/) - [Quay]({{site.baseurl}}/docs/integrations/container-registries/quay/) - -We are working on supporting integrations for more tools. Stay tuned for the release announcements. -For image enrichment with a tool that is as yet unsupported, you must define the explicit credentials. - -#### 2. Connect CI platform/tool to Codefresh - -Connect a CI platform/tool to Codefresh with an API token for the runtime cluster, the integration accounts, and image information for enrichment and reporting. - -[Codefresh Classic]({{site.baseurl}}/docs/integrations/ci-integrations/codefresh-classic/) -[GitHub Actions]({{site.baseurl}}/docs/integrations/ci-integrations/github-actions/) -[Jenkins]({{site.baseurl}}/docs/integrations/ci-integrations/jenkins/) - - -#### 3. Add the enrichment step for the CI platform/tool to your GitHub Actions pipeline - -Finally, add the enrichment step to your CI pipeline with the API token and integration information. Codefresh uses the integration name to get the corresponding Sealed Secret to securely access and retrieve the information for image enrichment. - - [GitHub Action Codefresh report image](https://github.com/marketplace/actions/codefresh-report-image){:target="\_blank"}. - [Codefresh Classic Codefresh report image](https://codefresh.io/steps/step/codefresh-report-image){:target="\_blank"}. - - -#### 4. View enriched image information -Once deployed, view enriched information in the Codefresh UI: -* Go to [Images](https://g.codefresh.io/2.0/images){:target="\_blank"} -* Go to the [Applications dashboard](https://g.codefresh.io/2.0/applications-dashboard){:target="\_blank"} - - -View: - -* Commit information as well as committer -* Links to build and deployment pipelines -* PRs included in the deployment -* Jira issues, status and details for each deployment - - -### Related articles -[Images]({{site.baseurl}}/docs/deployment/images/) -[Applications dashboard]({{site.baseurl}}/docs/deployment/applications-dashboard/) - diff --git a/_docs/integrations/issue-tracking.md b/_docs/integrations/issue-tracking.md deleted file mode 100644 index 60ea2d5a..00000000 --- a/_docs/integrations/issue-tracking.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: "Issue tracking integrations" -description: "" -group: integrations -toc: true ---- - -One of the major highlights of the Codefresh platform is the ability to automatically correlate -software features with their deployment (where and when). While the software version of a component is easily identifiable, what is likely more interesting and important is to know which features are included in a release. - -Adding an issue-tracking integration in Codefresh allows you to reference the integration in third-party CI platforms/tools such as GitHub Actions and Codefresh Classic by the name of the integration, instead of explicit credentials. See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) and [CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/). - -You add an issue-tracking integration in Codefresh by: -* Defining the integration name -* Selecting the runtime or runtimes it is shared with -* Defining the arguments -* Committing the changes - -Once added, Codefresh displays the list of existing integrations with their sync status. You can edit or delete any integration. -### Configure container registry integrations in Codefresh -Configure the settings for a container registry integration in Codefresh. - -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. -1. Filter by **Issue Tracking**, select the issue tracking tool to integrate, and click **Configure**. -1. Jira integrations only: For a new Jira integration, from the **Add Integration** dropdown, select the type of integration, as either **Deployment reporting** or **Image enrichment**. -1. If you already have integrations, click **Add**. -1. Define the arguments for the issue tracking tool: - [Jira]({{site.baseurl}}/docs/integrations/issue-tracking/jira/) -1. To confirm, click **Commit**. - It may take a few moments for the new integration to be synced to the cluster before it appears in the list. - -### Integration resource in shared configuration repo -The resource for the issue-tracking integration is created in the Git repository with the shared configuration, within `resources`. -The exact location depends on whether the integration is shared with all or specific runtimes: -* All runtimes: Created in `resources/all-runtimes-all-clusters/` -* Selected runtimes: Created in `resources/runtimes//` - -### View issue-tracking integrations -Selecting an issue tracking tool displays the existing integrations in Codefresh. - - -Every issue tracking integration displays the following information: -* Name of the integration -* Runtime or runtimes it is shared with -* Sync status - -### Edit/delete issue-tracking integrations in Codefresh -If you have existing integrations, you can change the credentials, or delete an integration. ->Deleting an integration deletes the integration resource from the shared configuration Git repo, its secrets, the CI workflows that -use it. - -1. In the Codefresh UI, go to [Integrations](https://g.codefresh.io/2.0/account-settings/integrations){:target="\_blank"}. -1. Filter by **Issue Tracking**, and select the specific integration. -1. In the row with the integration to edit or delete, click the three dots and select **Edit** or **Delete**. -1. To edit, update the **Username** and **Password** fields, and click **Test Connection** to verify the account credentials. -1. To delete, type **DELETE** in the text box as instructed. - - {% include - image.html - lightbox="true" - file="/images/integrations/jira/jira-delete.png" - url="/images/integrations/jira/jira-delete.png" - alt="Delete issue-tracking integration" - caption="Delete issue-tracking integration" - max-width="50%" - %} - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) - diff --git a/_docs/integrations/issue-tracking/jira.md b/_docs/integrations/issue-tracking/jira.md deleted file mode 100644 index fd223281..00000000 --- a/_docs/integrations/issue-tracking/jira.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: "Jira" -description: " " -group: integrations -sub_group: issue-tracking -toc: true ---- - - -Codefresh has native integration for Atlassian Jira, to enrich images with information from Jira. Codefresh can monitor a feature all the way from the ticket creation phase, up to when it is implemented and deployed to an environment. - -For information on adding a Jira integration in Codefresh, see [Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/). - - -### Prerequisites - -1. Get your Jira instance credentials by following the [Atlassian documentation](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/){:target="\_blank"}. -1. Note down the following as you will need them to complete the integration with Codefresh: - * Jira URL - * Jira username/email to be used for the integration - * Jira password/token created for this user - - -### Jira integration settings in Codefresh - -The table describes the arguments required to integrate Jira in Codefresh. - -{: .table .table-bordered .table-hover} -| Setting | Description | -| ---------- | -------- | -| **Integration name** | A friendly name for the integration. This is the name you will reference in the third-party CI platform/tool. | -| **All Runtimes/Selected Runtimes** | {::nomarkdown} The runtimes in the account with which to share the integration resource.
                                                        The integration resource is created in the Git repository with the shared configuration, within resources. The exact location depends on whether the integration is shared with all or specific runtimes:
                                                        • All runtimes: Created in resources/all-runtimes-all-clusters/
                                                        • Selected runtimes: Created in resources/runtimes//
                                                        You can reference the Docker Hub integration in the CI tool. {:/}| -|**Jira Host**| The URL of your Jira instance. For example, `https://.atlassian.net`| -|**API Token**| The Jira password/token you noted down when you created the Jira instance.| -|**API Email**| The email for the API token.| - - - {% include - image.html - lightbox="true" - file="/images/integrations/jira/jira-int-settings.png" - url="/images/integrations/jira/jira-int-settings.png" - alt="JIRA integration in Codefresh" - caption="JIRA integration in Codefresh" - max-width="60%" -%} - -For information on adding a Jira integration in Codefresh, see [Issue-tracking integrations]({{site.baseurl}}/docs/integrations/issue-tracking/). - -### Using Jira integration in pipelines -For pipelines based on GitHub Actions, configure the Jira integration in Codefresh, and then connect your GitHub Action to Codefresh, referencing the Jira integration by name. -Codefresh uses the Secret Key stored in the runtime cluster to securely access Jira and retrieve the information. - -### Related articles -[Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/) -[CI integrations]({{site.baseurl}}/docs/integrations/ci-integrations/) -[Container registry integrations]({{site.baseurl}}/docs/integrations/container-registries/) diff --git a/_docs/migration/gitops-dashboard.md b/_docs/migration/gitops-dashboard.md deleted file mode 100644 index 17429bac..00000000 --- a/_docs/migration/gitops-dashboard.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "GitOps dashboard" -description: "" -group: migration -toc: true ---- - -Coming soon diff --git a/_docs/migration/pipelines.md b/_docs/migration/pipelines.md deleted file mode 100644 index 176f096c..00000000 --- a/_docs/migration/pipelines.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Pipelines" -description: "" -group: migration -toc: true ---- - -Coming soon diff --git a/_docs/pipelines/advanced-workflows.md b/_docs/pipelines/advanced-workflows.md new file mode 100644 index 00000000..1314ff7f --- /dev/null +++ b/_docs/pipelines/advanced-workflows.md @@ -0,0 +1,972 @@ +--- +title: "Advanced workflows with parallel steps" +description: "Create complex workflows in Codefresh with step dependencies" +group: codefresh-yaml +toc: true +--- + +Codefresh is very flexible when it comes to pipeline complexity and depth. + +You can easily create: + * Sequential pipelines where step order is the same as the listing order in YAML (simple) + * Sequential pipelines that have some parallel parts (intermediate) + * Parallel pipelines where step order is explicitly defined (advanced) + +With the parallel execution mode, you can define complex pipelines with fan-in/out configurations capable of matching even the most complicated workflows within an organization. + +>In Codefresh, parallel execution is unrelated to [stages]({{site.baseurl}}/docs/codefresh-yaml/stages/). Stages are only a way to visually organize your pipeline steps. The actual execution is independent from the visual layout in the logs view. + +Before going any further make sure that you are familiar with the [basics of Codefresh pipelines]({{site.baseurl}}/docs/configure-ci-cd-pipeline/introduction-to-codefresh-pipelines/). + +Codefresh offers two modes of execution: + +1. Sequential mode (which is the default) +1. Parallel mode + +## Sequential execution mode + +The sequential mode is very easy to understand and visualize. + +In sequential mode, the Codefresh execution engine starts from the first step defined at the top of the `codefresh.yml` file, and executes all steps one by one going down to the end of the file. A step is either executed or skipped according to its conditions. + +>The condition for each step is only examined **once**. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +mode: sequential +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: sample-python-image + working_directory: ./ + tag: ${{CF_BRANCH_TAG_NORMALIZED}} + dockerfile: Dockerfile + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +Here we have two steps, one that creates a Docker image and a second one that runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) inside it. The order of execution is identical to the order of the steps in the YAML file. This means that unit tests will always run after the Docker image creation. + +Notice that the line `mode: sequential` is shown only for illustration purposes. Sequential mode is the default, and therefore this line can be omitted. + + +## Inserting parallel steps in a sequential pipeline + +You don't have to activate parallel execution mode for the whole pipeline if only a part of it needs to run in parallel. Codefresh allows you insert a parallel phase inside a sequential pipeline with the following syntax: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_task1: + title: My Task 1 + [...] + my_parallel_tasks: + type: parallel + steps: + my_task2a: + title: My Task 2A + [...] + my_task2b: + title: My Task 2B + [...] + my_task3: + title: My Task3 + [...] +{% endraw %} +{% endhighlight %} + + +In this case tasks 2A and 2B will run in parallel. +The step name that defines the parallel phase (`my_parallel_tasks` in the example above), is completely arbitrary. + +The final order of execution will be + +1. Task 1 +1. Task 2A and Task2B at the same time +1. Task 3 + +This is the recommended way to start using parallelism in your Codefresh pipelines. It is sufficient for most scenarios that require parallelism. + +>The step names must be unique within the same pipeline. The parent and child steps should NOT share the same name. + +### Example: pushing multiple Docker images in parallel + +Let's see an example where a Docker image is created and then we push it to more than one registry. This is a perfect candidate for parallelization. Here is the `codefresh.yml`: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- build +- push +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'build' + type: build + image_name: trivialgoweb + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + PushingToRegistries: + type: parallel + stage: 'push' + steps: + jfrog_PushingTo_jfrog_BintrayRegistry: + type: push + title: jfrog_Pushing To Bintray Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: bintray + PushingToGoogleRegistry: + type: push + title: Pushing To Google Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: gcr + PushingToDockerRegistry: + type: push + title: Pushing To Dockerhub Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + image_name: kkapelon/trivialgoweb + registry: dockerhub +{% endraw %} +{% endhighlight %} + +The order of execution is the following: + +1. MyAppDockerImage ([build step]({{site.baseurl}}/docs/pipelines/steps/build/)) +1. jfrog_PushingTo_jfrog_BintrayRegistry, PushingToGoogleRegistry, PushingToDockerRegistry ([push steps]({{site.baseurl}}/docs/pipelines/steps/push/)) + +The pipeline view for this yaml file is the following. + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/parallel-push.png" +url="/images/codefresh-yaml/parallel-push.png" +alt="Parallel Docker push" +caption="Parallel Docker push" +max-width="80%" +%} + +As you can see we have also marked the steps with [stages]({{site.baseurl}}/docs/pipelines/stages/) so that we get a visualization that matches the execution. + + +### Example: Running multiple test suites in parallel + +All types of steps can by placed inside a parallel phase. Another common use case would be the parallel execution of [freestyle steps]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) for unit/integration tests. + +Let's say that you have a Docker image with a Python back-end and a JavaScript front-end. You could run both types of tests in parallel with the following yaml syntax: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-full-stack-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + MyTestingPhases: + type: parallel + steps: + my_back_end_tests: + title: Running Back end tests + image: ${{MyAppDockerImage}} + commands: + - python setup.py test + my_front_end_tests: + title: Running Front End tests + image: ${{MyAppDockerImage}} + commands: + - npm run test +{% endraw %} +{% endhighlight %} + +Running different types of tests (unit/integration/load/acceptance) in parallel is a very common use case for parallelism inside an otherwise sequential pipeline. + +### Defining success criteria for a parallel step + +By default, any failed step in a Codefresh pipeline will fail the whole pipeline. There are ways to change this behavior (the `fail_fast` property is explained later in this page), but specifically for parallel steps you can define exactly when the whole step succeeds or fails. + +You can define steps that will be used to decide if a parallel step succeeds with this syntax: + +{% highlight yaml %} +second_step: + title: Second step + success_criteria: + steps: + only: + - my_unit_tests + type: parallel + steps: + my_unit_tests: + title: Running Back end tests + image: node + commands: + - npm run test + my_integration_tests: + title: Running Integration tests + image: node + commands: + - npm run int-test + my_acceptance_tests: + title: Running Acceptance tests + image: node + commands: + - npm run acceptance-test +{% endhighlight %} + +In the example above, if integration and/or acceptance tests fail, the whole pipeline will continue, because we have defined that only the results of unit test matter for the whole parallel step. + +The reverse relationship (i.e., defining steps to be ignored) can be defined with the following syntax + +{% highlight yaml %} +second_step: + title: Second step + success_criteria: + steps: + ignore: + - my_integration_tests + - my_acceptance_tests + type: parallel + steps: + my_unit_tests: + title: Running Back end tests + image: node + commands: + - npm run test + my_integration_tests: + title: Running Integration tests + image: node + commands: + - npm run int-test + my_acceptance_tests: + title: Running Acceptance tests + image: node + commands: + - npm run acceptance-test +{% endhighlight %} + +In the example above we have explicitly defined that even if the integration or acceptance tests fail the whole pipeline will continue. + +### Shared Codefresh volume and race conditions + +In any pipeline step, Codefresh automatically attaches a [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) that is used to transfer artifacts between steps. The same volume is also shared between steps that run in parallel. + + +Here is an example where two parallel steps are writing two files. After they finish execution, we list the contents of the project folder. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + WritingInParallel: + type: parallel + steps: + writing_file_1: + title: Step1A + image: alpine + commands: + - echo "Step1A" > first.txt + writing_file_2: + title: Step1B + image: alpine + commands: + - echo "Step1B" > second.txt + MyListing: + title: Listing of files + image: alpine + commands: + - ls +{% endraw %} +{% endhighlight %} + +The results from the `MyListing` step is the following: + +``` +first.txt second.txt +``` + +This illustrates the side effects for both parallel steps that were executed on the same volume. + +>It is therefore your responsibility to make sure that steps that run in parallel play nice with each other. Currently, Codefresh performs no conflict detection at all. If there are race conditions between your parallel steps, (e.g. multiple steps writing at the same files), the final behavior is undefined. It is best to start with a fully sequential pipeline, and use parallelism in a gradual manner if you are unsure about the side effects of your steps + +## Implicit parallel steps +> If you use implicit parallel steps, you _cannot_ use _parallel pipeline mode_. + +In all the previous examples, all parallel steps have been defined explicitly in a pipeline. This works well for a small number of steps, but in some cases it can be cumbersome to write such a pipeline, especially when the parallel steps are similar. + +Codefresh offers two handy ways to lessen the amount of YAML you have to write and get automatic parallelization with minimum effort. + +* The `scale` syntax allows you to quickly create parallel steps that are mostly similar (but still differ) +* The `matrix` syntax allows you to quickly create parallel steps for multiple combinations of properties + +### Scale parallel steps (one dimension) + +If you look back at the parallel docker push example you will see that all push steps are the same. The only thing that changes is the registry that they push to. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- build +- push +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'build' + type: build + image_name: trivialgoweb + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + PushingToRegistries: + type: parallel + stage: 'push' + steps: + jfrog_PushingTo_jfrog_BintrayRegistry: + type: push + title: jfrog_Pushing To Bintray Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: bintray + PushingToGoogleRegistry: + type: push + title: Pushing To Google Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + registry: gcr + PushingToDockerRegistry: + type: push + title: Pushing To Dockerhub Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + image_name: kkapelon/trivialgoweb + registry: dockerhub +{% endraw %} +{% endhighlight %} + + +This pipeline can be simplified by using the special `scale` syntax to create a common parent step with all similarities: + + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- build +- push +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'build' + type: build + image_name: trivialgoweb + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + PushingToRegistries: + stage: 'push' + type: push + tag: '${{CF_SHORT_REVISION}}' + candidate: ${{MyAppDockerImage}} + scale: + jfrog_PushingTo_jfrog_BintrayRegistry: + registry: bintray + PushingToGoogleRegistry: + registry: gcr + PushingToDockerRegistry: + image_name: kkapelon/trivialgoweb + registry: dockerhub +{% endraw %} +{% endhighlight %} + +You can see now that all common properties are defined once in the parent step (`PushingToRegistries`) while each push step only contains what differs. Codefresh will automatically create parallel steps when it encounters the `scale` syntax. + +The resulting pipeline is more concise but runs in the same manner as the original YAML. For a big number of parallel steps, the `scale` syntax is very helpful for making the pipeline definition more clear. + +You can use the `scale` syntax with all kinds of steps in Codefresh and not just push steps. Another classic example would be running tests in parallel with different environment variables. + + +`YAML` +{% highlight yaml %} +{% raw %} + run_tests_in_parallel: + stage: 'Microservice A' + working_directory: './my-front-end-code' + image: node:latest + commands: + - npm run test + scale: + first: + environment: + - TEST_NODE=0 + second: + environment: + - TEST_NODE=1 + third: + environment: + - TEST_NODE=2 + fourth: + environment: + - TEST_NODE=3 +{% endraw %} +{% endhighlight %} + +This pipeline will automatically create 4 parallel freestyle steps. All of them will use the same Docker image and executed the same command (`npm run test`) but each one will receive a different value for the environment variable called `TEST_NODE`. + +Notice that if you define environment variables on the parent step (`run_tests_in_parallel` in the example above), they will also be available on the children parallel steps. And if those define, environment variables as well, all environment variables will be available. + + +### Matrix parallel steps (multiple dimensions) + +The `scale` syntax allows you to easily create multiple parallel steps that differ only in a single dimension. If you have multiple dimensions of properties that differ and you want to run all possible combinations (Cartesian product) then the `matrix` syntax will do that for you automatically. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/cf-example-unit-test' + revision: 'master' + git: github + stage: prepare + run_my_tests_before_build: + stage: test + working_directory: './golang-app-A' + commands: + - go test -v + matrix: + image: + - golang:1.11 + - golang:1.12 + - golang:1.13 + environment: + - [CGO_ENABLED=1] + - [CGO_ENABLED=0] +{% endraw %} +{% endhighlight %} + +Here we want run unit tests with 3 different versions of GO and also try with CGO enabled or not. Instead of manually writing 6 parallel steps in your pipeline with all possible combinations, we can simply use the `matrix` syntax to create the following parallel steps: + +* Go 1.11 with CGO enabled +* Go 1.11 with CGO disabled +* Go 1.12 with CGO enabled +* Go 1.12 with CGO disabled +* Go 1.13 with CGO enabled +* Go 1.13 with CGO disabled + +The resulting Codefresh YAML is much more compact. Notice that because the `environment` property in Codefresh is already an array on its own, when we use it with the `matrix` syntax we need to enclose its value with `[]` (array of arrays). + +You can add more dimensions to a matrix build (and not just two as shown in the example). Here is another example with 3 dimensions: + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test +steps: + main_clone: + title: Cloning main repository... + stage: prepare + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: master + git: github + MyUnitTests: + stage: test + matrix: + image: + - 'maven:3.5.2-jdk-8-alpine' + - 'maven:3.6.2-jdk-11-slim' + - 'maven:3-jdk-8' + commands: + - ["mvn --version", "mvn -Dmaven.repo.local=/codefresh/volume/m2_repository test"] + - ["mvn --version", "mvn -Dmaven.test.skip -Dmaven.repo.local=/codefresh/volume/m2_repository package"] + environment: + - [MAVEN_OPTS=-Xms1024m] + - [MAVEN_OPTS=-Xms512m] +{% endraw %} +{% endhighlight %} + +This pipeline creates 3 x 2 x 2 = 12 parallel steps with all the possible combinations of: + +* Maven version +* Running or disabling tests +* Using 1GB or 512MBs of memory. + +Remember that all parallel steps run within the same pipeline executor so make sure that you have enough resources as the number +of matrix variations can quickly grow if you add too many dimensions. + +Notice that, as with the `scale` syntax, the defined values/properties are merged between parent step (`MyUnitTests` in the example above) and children steps. For example, if you set an environment variable on the parent and also on child matrix steps , the result will a merged environment where all values are available. + +## Parallel pipeline execution +> If you use parallel execution mode for pipelines, you _cannot_ use _implicit parallel steps_. + +To activate advanced parallel mode for the whole pipeline, you need to declare it explicitly at the root of the `codefresh.yml` file: + +``` +version: '1.0' +mode: parallel +steps: +[...] +``` + +In full parallel mode, the order of steps inside the `codefresh.yml` **does not** affect the order of execution at all. The Codefresh pipeline engine instead: + +1. Evaluates all step-conditions *at the same* time +2. Executes those that have their requirements met +3. Starts over with the remaining steps +4. Stops when there are no more steps to evaluate + +This means that in parallel mode the conditions of a step are evaluated **multiple times** as the Codefresh execution engine tries to find which steps it should run next. This implication is very important when you try to understand the order of step execution. + +Notice also that in parallel mode, if you don't define any step conditions, Codefresh will try to run **all** steps at once, which is probably not what you want in most cases. + +With parallel mode you are expected to define the order of steps in the yaml file, and the Codefresh engine will create a *graph* of execution that satisfies your instructions. This means that writing the `codefresh.yml` file requires more effort on your part, but on the other hand allows you to define the step order in ways not possible with the sequential mode. You also need to define which steps should depend on the automatic cloning of the pipeline (which is special step named `main_clone`). + +In the next sections we describe how you can define the steps dependencies in a parallel pipeline. + +### Single step dependencies + +At the most basic level, you can define that a step *depends on* the execution of another step. This dependency is very flexible as Codefresh allows you run a second step once: + +1. The first step is finished with success +1. The first step is finished with failure +1. The first completes (regardless of exit) status + +The syntax for this is the following post-condition: + +{% highlight yaml %} +second_step: + title: Second step + when: + steps: + - name: first_step + on: + - success +{% endhighlight %} + +If you want to run the second step only if the first one fails the syntax is: + +{% highlight yaml %} +second_step: + title: Second step + when: + steps: + - name: first_step + on: + - failure +{% endhighlight %} + +Finally, if you don't care about the completion status the syntax is: + +{% highlight yaml %} +second_step: + title: Second step + when: + steps: + - name: first_step + on: + - finished +{% endhighlight %} + +Notice that `success` is the default behavior so if you omit the last two lines (i.e., the `on:` part) the second step +will wait for the next step to run successfully. + +>Also notice that the name `main_clone` is reserved for the automatic clone that takes place in the beginning of pipelines that are linked to a git repository. You need to define which steps depend on it (probably the start of your graph) so that `git checkout` happens before the other steps. + +As an example, let's assume that you have the following steps in a pipeline: + +1. A build step that creates a Docker image +1. A freestyle step that runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) inside the Docker image +1. A freestyle step that runs [integrations tests]({{site.baseurl}}/docs/testing/integration-tests/) *After* the unit tests, even if they fail +1. A cleanup step that runs after unit tests if they fail + +Here is the full pipeline. Notice the explicit dependency to the `main_clone` step that checks out the code. + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +mode: parallel +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-node-js-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + when: + steps: + - name: main_clone + on: + - success + MyUnitTests: + title: Running unit tests + image: ${{MyAppDockerImage}} + fail_fast: false + commands: + - npm run test + when: + steps: + - name: MyAppDockerImage + on: + - success + MyIntegrationTests: + title: Running integration tests + image: ${{MyAppDockerImage}} + commands: + - npm run integration-test + when: + steps: + - name: MyUnitTests + on: + - finished + MyCleanupPhase: + title: Cleanup unit test results + image: alpine + commands: + - ./cleanup.sh + when: + steps: + - name: MyUnitTests + on: + - failure +{% endraw %} +{% endhighlight %} + +If you run the pipeline you will see that Codefresh automatically understands that `MyIntegrationTests` and `MyCleanupPhase` can run in parallel right after the unit tests finish. + +Also notice the `fail_fast: false` line in the unit tests. By default, if *any* steps fails in a pipeline the whole pipeline is marked as a failure. With the `fail_fast` directive we can allow the pipeline to continue so that other steps that depend on the failed step can still run even. + + +### Multipl step dependencies + +A pipeline step can also depend on multiple other steps. + +The syntax is: + +{% highlight yaml %} +third_step: + title: Third step + when: + steps: + all: + - name: first_step + on: + - success + - name: second_step + on: + - finished +{% endhighlight %} + +In this case, the third step will run only when BOTH first and second are finished (and first is actually a success) + +*ALL* is the default behavior so it can be omitted if this is what you need. The example above +is example the same as below: + +{% highlight yaml %} +third_step: + title: Third step + when: + steps: + - name: first_step + on: + - success + - name: second_step + on: + - finished +{% endhighlight %} + +Codefresh also allows you to define *ANY* behavior in an explicit manner: + +{% highlight yaml %} +third_step: + title: Third step + when: + steps: + any: + - name: first_step + on: + - success + - name: second_step + on: + - finished +{% endhighlight %} + +Here the third step will run when either the first one *OR* the second one have finished. + +As an example let's assume this time that we have: + +1. A build step that creates a docker image +1. Unit tests that will run when the docker image is ready +1. Integration tests that run either after unit tests or if the docker image is ready (contrived example) +1. A cleanup step that runs when both kinds of tests are finished + +Here is the full pipeline + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +mode: parallel +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-node-js-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + MyUnitTests: + title: Running unit tests + image: ${{MyAppDockerImage}} + fail_fast: false + commands: + - npm run test + when: + steps: + - name: MyAppDockerImage + on: + - success + MyIntegrationTests: + title: Running integration tests + image: ${{MyAppDockerImage}} + commands: + - npm run integration-test + when: + steps: + any: + - name: MyUnitTests + on: + - finished + - name: MyAppDockerImage + on: + - success + MyCleanupPhase: + title: Cleanup unit test results + image: alpine + commands: + - ./cleanup.sh + when: + steps: + all: + - name: MyUnitTests + on: + - finished + - name: MyIntegrationTests + on: + - finished +{% endraw %} +{% endhighlight %} + +In this case Codefresh will make sure that cleanup happens only when both unit and integration tests are finished. + + +### Custom step dependencies + +For maximum flexibility you can define a custom condition for a step. + +It is hard to describe all possible cases, because Codefresh supports a [mini DSL]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/#condition-expression-syntax) for conditions. All examples mentioned in conditional execution are still valid in parallel pipelines. + +For example, run this step only if a PR is opened against the production branch: + +{% highlight yaml %} +{% raw %} +my_step: + title: My step + when: + condition: + all: + validateTargetBranch: '"${{CF_PULL_REQUEST_TARGET}}" == "production"' + validatePRAction: '''${{CF_PULL_REQUEST_ACTION}}'' == ''opened''' +{% endraw %} +{% endhighlight %} + +Run this step only for the master branch and when the commit message does not include "skip ci": + +{% highlight yaml %} +{% raw %} +my_step: + title: My step + when: + condition: + all: + noSkipCiInCommitMessage: 'includes(lower("${{CF_COMMIT_MESSAGE}}"), "skip ci") == false' + masterBranch: '"${{CF_BRANCH}}" == "master"' +{% endraw %} +{% endhighlight %} + +You can now add extra conditions regarding the completion state of specific steps. A global object called `steps` contains all steps by name along with a `result` property with the following possible completion states: + +* Success +* Failure +* Skipped (only valid in sequential mode) +* Finished (regardless of status) +* Pending +* Running + +Finished is a shorthand for `success` or `failure` or `skipped`. It is only valid when used in [step dependencies]({{site.baseurl}}/docs/codefresh-yaml/advanced-workflows/#single-step-dependencies), and cannot be used in custom conditions. + +You can mix and match completion states from any other step in your pipeline. Here are some examples: + +{% highlight yaml %} +my_step: + title: My step + when: + condition: + all: + myCondition: steps.MyUnitTests.result == 'failure' || steps.MyIntegrationTests.result == 'failure' +{% endhighlight %} + +{% highlight yaml %} +my_step: + title: My step + when: + condition: + any: + myCondition: steps.MyLoadTesting.result == 'success' + myOtherCondition: steps.MyCleanupStep.result == 'success' +{% endhighlight %} + +You can also use conditions in the success criteria for a parallel step. Here is an example + +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- start +- tests +- cleanup +steps: + MyAppDockerImage: + stage: 'start' + title: Building Docker Image + type: build + image_name: my-full-stack-app + working_directory: ./01_sequential/ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: Dockerfile + MyTestingPhases: + type: parallel + stage: 'tests' + success_criteria: + condition: + all: + myCondition: ${{steps.my_back_end_tests.result}} === 'success' && ${{steps.my_front_end_tests.result}} === 'success' + steps: + my_back_end_tests: + title: Running Back end tests + image: ${{MyAppDockerImage}} + commands: + - exit 1 + my_front_end_tests: + title: Running Front End tests + image: ${{MyAppDockerImage}} + commands: + - echo "Second" + MyCleanupPhase: + stage: 'cleanup' + title: Cleanup unit test results + image: alpine + commands: + - echo "Finished" +{% endraw %} +{% endhighlight %} + + +## Handling error conditions in a pipeline + +It is important to understand the capabilities offered by Codefresh when it comes to error handling. You have several options in different levels of granularity to select what constitutes a failure and what not. + +By default, *any* failed step in a pipeline will abort the whole pipeline and mark it as failure. + +You can use the directive `fail_fast: false`: +* In a specific step to mark it as ignored if it fails +* At the root level of the pipeline if you want to apply it to all steps + +Therefore, if you want your pipeline to keep running to completion regardless of errors the following syntax is possible: + +``` +version: '1.0' +fail_fast: false +steps: +[...] +``` + +You also have the capability to define special steps that will run when the whole pipeline has a special completion status. Codefresh offers a special object called `workflow` that represents the whole pipeline and allows you to evaluate its status in a step. + +For example, you can have a cleanup step that will run only if the workflow fails (regardless of the actual step that created the error) with the following syntax: + +{% highlight yaml %} +my_cleanup_step: + title: My Pipeline Cleanup + when: + condition: + all: + myCondition: workflow.result == 'failure' +{% endhighlight %} + +As another example we have a special step that will send an email if the pipeline succeeds or if load-tests fail: + +{% highlight yaml %} +my_email_step: + title: My Email step + when: + condition: + any: + myCondition: workflow.result == 'success' + myTestCondition: steps.MyLoadTesting.result == 'failure' +{% endhighlight %} + +Notice that both examples assume that `fail_fast: false` is at the root of the `codefresh.yaml` file. + +The possible values for `workflow.result` are: + +* `running` +* `terminated` +* `failure` +* `pending-approval` +* `success` + + +## Related articles +[Variables in pipelines]({{site.baseurl}}/docs/pipelines/variables/) +[Hooks in pipelines]({{site.baseurl}}/docs/pipelines/hooks/) + + + + + + + + diff --git a/_docs/pipelines/annotations.md b/_docs/pipelines/annotations.md new file mode 100644 index 00000000..2adab184 --- /dev/null +++ b/_docs/pipelines/annotations.md @@ -0,0 +1,301 @@ +--- +title: "Annotations in CI pipelines" +description: "Mark your builds and projects with extra annotations" +group: codefresh-yaml +toc: true +--- + +Codefresh supports the annotations of several entities with custom annotations. You can use these annotations to store any optional information that you wish to keep associated with each entity. Examples would be storing the test coverage for a particular build, or a special settings file for a pipeline. + +Currently Codefresh supports extra annotations for: + +* Projects +* Pipelines +* Builds +* Docker images + +You can view/edit annotations using the [Codefresh CLI](https://codefresh-io.github.io/cli/annotations/) or directly in the Codefresh Web UI. + +>Notice that the syntax shown in this page is deprecated but still supported. For the new syntax +see [hooks]({{site.baseurl}}/docs/pipelines/hooks/). + + +## Adding annotations + +In the most basic scenario you can use the [post operations]({{site.baseurl}}/docs/pipelines/post-step-operations/) of any Codefresh [step]({{site.baseurl}}/docs/pipelines/steps/) to add annotations: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_custom_step: + title: Adding annotations to a project + image: alpine:3.9 + commands: + - echo "Hello" + on_success: + annotations: + set: + - entity_id: annotate-examples + entity_type: project + annotations: + - my_annotation_example1: 10.45 + - my_empty_annotation + - my_string_annotation: Hello World +{% endraw %} +{% endhighlight %} + + +This pipeline adds three annotations to a project called `annotate-examples`. The name of each annotation can only contain letters (upper and lowercase), numbers and the underscore character. The name of each annotation must start with a letter. + + +For the `entity_id` value you can also use an actual ID instead of a name. The `entity_id` and `entity_type` are define which entity will hold the annotations. The possible entity types are: + +* `project` (for a project, even a different one) +* `pipeline` (for a pipeline, even a different one) +* `build` (for a build, even a different one) +* `image` (for a docker image) + +If you don't define them, then by default the current build will be used with these values: +* `entity_id` is `{% raw %}${{CF_BUILD_ID}}{% endraw %}` (i.e. the current build) +* `entity_type` is `build` + +Here is another example where we add annotations to another pipeline as well as another build (instead of the current one) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_custom_step: + title: Adding annotations to multiple entities + image: alpine:3.9 + commands: + - echo "Hello" + on_success: + annotations: + set: + - entity_id: my-project/my-basic-pipeline + entity_type: pipeline + annotations: + - my_annotation_example1: 10.45 + - my_empty_annotation + - my_string_annotation: Hello World + - entity_id: 5ce2a0e869e2ed0a60c1e203 + entity_type: build + annotations: + - my_coverage: 70 + - my_url_example: http://www.example.com +{% endraw %} +{% endhighlight %} + +It is therefore possible to store annotations on any Codefresh entity (and not just the ones that are connected to the build that is adding annotations). + +## Viewing/editing annotations + +You can view the annotations using the Codefresh CLI + +```shell +codefresh get annotation project annotate-examples +``` + +You can also view annotations within the Codefresh UI. + +For build annotations click the *Annotations* on the build details: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png" +alt="Viewing Build annotations" +caption="Viewing Build annotations" +max-width="80%" +%} + +For pipeline annotations click the *Annotations* button in the pipeline list view: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png" +alt="Viewing Pipeline annotations" +caption="Viewing Pipeline annotations" +max-width="80%" +%} + +For project annotations click the *Annotations* button in the project list view: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png" +alt="Viewing project annotations" +caption="Viewing project annotations" +max-width="80%" +%} + +In all cases you will see a dialog with all existing annotations. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png" +url="/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png" +alt="Editing annotations" +caption="Editing annotations" +max-width="50%" +%} + +You can add additional annotations manually by clicking the *Add annotation* button and entering: + +* The name of the annotation +* The type of the annotation (text, number, percentage, link, boolean) +* The desired value + +Click *Save* to apply your changes. + +## Complex annotation values + +Apart from scalar values, you can also store more complex expressions in annotations. You have access to all [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/), text files from the build and even evaluations from the [expression syntax]({{site.baseurl}}/docs/pipelines/condition-expression-syntax/). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'kostis-codefresh/nestjs-example' + revision: '${{CF_REVISION}}' + my_custom_step: + title: Complex annotations + image: alpine:3.9 + commands: + - echo "Hello" + - echo "Sample content" > /tmp/my-file.txt + on_finish: + annotations: + set: + - entity_id: annotate-examples/simple + entity_type: pipeline + annotations: + - qa: pending + - commit_message: ${{CF_COMMIT_MESSAGE}} + - is_main_branch: + evaluate: "'${{CF_BRANCH}}' == 'main'" + - my_json_file: "file:/tmp/my-file.txt" + - my_docker_file: "file:Dockerfile" +{% endraw %} +{% endhighlight %} + +>Notice that this pipeline is using dynamic git repository variables, so it must be linked to a least one [git trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) in order to work. + +The last two annotations add the text of a file as a value. You can define an absolute or relative path. No processing is done on the file before being stored. If a file is not found, the annotation will still be added verbatim. +We suggest you only store small text files in this manner as annotations values. + +## Removing annotations + +You can also remove annotations by mentioning their name: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_custom_step: + title: Adding annotations to a pipeline + image: alpine:3.9 + commands: + - echo "Hello" + on_success: + annotations: + set: + - entity_id: my-project/my-basic-pipeline + entity_type: pipeline + annotations: + - my_annotation_example1: 10.45 + - my_empty_annotation + - my_string_annotation: Hello World + - my_second_annotation: This one will stay + my_unit_tests: + title: Removing annotations + image: alpine:3.9 + commands: + - echo "Tests failed" + - exit 1 + on_fail: + annotations: + unset: + - entity_id: my-project/my-basic-pipeline + entity_type: pipeline + annotations: + - my_annotation_example1 + - my_empty_annotation + - my_string_annotation +{% endraw %} +{% endhighlight %} + +You can also use both `unset` and `set` block in a single `annotations` block. And of course, you can remove annotations from multiple entities. + +The `unset` annotation can be used with all post-step operations (`on_success`, `on_fail`, `on_finish`). + + +## Adding annotations to the current build/image + +As a convenience feature: + +1. If your pipeline has a build step +1. If you want to add annotations to the present build or image + +you can also define annotations in the root level of the build step and not mention the entity id and type. Annotations will then be added in the present build. + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'kostis-codefresh/nestjs-example' + revision: 'master' + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-app-image + working_directory: ./ + tag: 'sample' + dockerfile: Dockerfile + annotations: + set: + - annotations: + - my_number_annotation: 9999 + - my_empty_annotation + - my_docker_file: "file:Dockerfile" + - my_text_annotation: simple_text +{% endraw %} +{% endhighlight %} + +After running this pipeline at least once, you can retrieve the annotations from any previous build by using the respective id: + +```shell +codefresh get annotation build 5ce26f5ff2ed0edd561fa2fc +``` + +You can also define `entity_type` as `image` and don't enter any `entity_id`. In this case the image created from the build step will be annotated. + + +Note that this syntax is optional. You can still define annotations for a build/image or any other entity using the post operations of any step by mentioning explicitly the target id and type. + +## Related articles +[Image annotations]({{site.baseurl}}/docs/docker-registries/metadata-annotations/) +[Post-step operations]({{site.baseurl}}/docs/pipelines/post-step-operations/) +[Creating CI pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Hooks in CI pipelines]({{site.baseurl}}/docs/pipelines/hooks/) diff --git a/_docs/pipelines/condition-expression-syntax.md b/_docs/pipelines/condition-expression-syntax.md new file mode 100644 index 00000000..8e1ea6c6 --- /dev/null +++ b/_docs/pipelines/condition-expression-syntax.md @@ -0,0 +1,107 @@ +--- +title: "Condition Expression Syntax" +description: "Condition expressions can be included in each step in your codefresh.yml, and must be satisfied for the step to execute." +group: codefresh-yaml +redirect_from: + - /docs/condition-expression-syntax/ + - /docs/codefresh-yaml/expression-condition-syntax/ +toc: true +--- +Each step in `codefresh.yml` file can contain conditions expressions that must be satisfied for the step to execute. + +This is a small example of where a condition expression can be used: + `YAML` +{% highlight yaml %} +step-name: + description: Step description + image: image/id + commands: + - bash-command1 + - bash-command2 + when: + condition: + all: + executeForMasterBranch: "{% raw %}'${{CF_BRANCH}}{% endraw %}' == 'master'" +{% endhighlight %} + +A condition expression is a basic expression that is evaluated to true/false (to decide whether to execute or not to execute), and can have the following syntax: + +### Types + +{: .table .table-bordered .table-hover} +| Type | True/False Examples | True/False | +| ------- | ----------------------------------------- | --------------| +| String | True: "hello"
                                                        False: "" | {::nomarkdown}
                                                        • String with content = true
                                                        • Empty string = false
                                                        • String with content = true
                                                        String comparison is lexicographic.{:/} | +| Number | True: 5
                                                        True: 3.4
                                                        True: 1.79E+308 | {::nomarkdown}
                                                        • Any number other than 0 = true.
                                                        • 0 = false
                                                        {:/} | +| Boolean | True: true
                                                        False: false | {::nomarkdown}
                                                        • True = true
                                                        • False = false
                                                        {:/} | +| Null | False: null | Always false | + +### Variables + +You can use the User Provided variables as explained in [Variables]({{site.baseurl}}/docs/pipelines/variables/), including the [variables +exposed by each individual pipeline step]({{site.baseurl}}/docs/pipelines/variables/#step-variables). + +### Unary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ---------- | --------------------- | +| `-` | Negation of numbers | +| `!` | Logical NOT | + +### Binary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| --------------------------- | ----------- | +| Add, String Concatenation | `+` | +| Subtract | `-` | +| Multiply | `*` | +| Divide | `/` | +| Modulus | `%` | +| Logical AND | `&&` | +| Logical OR | `||` | + +### Comparisons + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ----------- | ---------------------- | +| `==` | Equal to | +| `!=` | Not equal to | +| `>` | Greater than | +| `>=` | Greater than or equal | +| `<` | Less than | +| `<=` | Less than or equal | + +### Functions + +{: .table .table-bordered .table-hover} +| Function Name | Parameters | Return value | Example | +| ------------- | ------------------ | -------------- | ----------------------- | +| String | 0: number or string | String of input value. | `String(40) == '40'` | +| Number | 0: number or string | Number of input value. | `Number('50') == 50`
                                                        `Number('hello')` is invalid | +| Boolean | 0: number or string | Boolean of input value. | `Boolean('123') == true`
                                                        `Boolean('') == false`
                                                        `Boolean(583) == true`
                                                        `Boolean(0) == false` | +| round | 0: number | Rounded number. | `round(1.3) == 1`
                                                        `round(1.95) == 2` | +| floor | 0: number | Number rounded to floor. | `floor(1.3) == 1`
                                                        `floor(1.95) == 1` | +| upper | 0: string | String in upper case. | `upper('hello') == 'HELLO'` | +| lower | 0: string | String in lower case. | `lower('BYE BYE') == 'bye bye'` | +| trim | 0: string | Trimmed string. | `trim(" abc ") == "abc"` | +| trimLeft | 0: string | Left-trimmed string. | `trimLeft(" abc ") == "abc "`| +| trimRight | 0: string | Right-trimmed string. | `trimRight(" abc ") == " abc"` | +| replace | 0: string - main string
                                                        1: string - substring to find
                                                        2: string - substring to replace | Replace all instances of the sub-string (1) in the main string (0) with the sub-string (2). | `replace('hello there', 'e', 'a') == 'hallo thara'`| +| substring | 0: string - main string
                                                        1: string - index to start
                                                        2: string - index to end | Returns a sub-string of a string. | `substring("hello world", 6, 11) == "world"` | +| length | string | Length of a string. | `length("gump") == 4` | +| includes | 0: string - main string
                                                        1: string - string to search for | Whether a search string is located within the main string. | `includes("codefresh", "odef") == true` | +| indexOf | 0: string - main string
                                                        1: string - string to search for | Index of a search string if it is found inside the main string | `indexOf("codefresh", "odef") == 1` | +| match | 0: string - main string
                                                        1: string - regular expression string, [JS style](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) (Note: in JS strings, the backslash `\` is an escape character so in order to use a literal backslash, you need to escape it. For example: `"^\\d+$"` instead of `"^\d+$"`)
                                                        2: boolean - ignore case | Search for a regular expression inside a string, ignoring or not ignoring case | `match("hello there you", "..ll.", false) == true`
                                                        `match("hello there you", "..LL.", false) == false`
                                                        `match("hello there you", "hell$", true) == false`
                                                        `match("hello there you", "^hell", true) == true`
                                                        `match("hello there you", "bye", false) == false` | +| Variable | string | Search for the value of a variable | `Variable('some-clone')` | +| Member | 0: string - variable name
                                                        1: string - member name | Search for the value of a variable member | `Member('some-clone', 'working-directory')` | + +## What to read next + +* [Conditional Execution of Steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) +* [Condition Expression Syntax]({{site.baseurl}}/docs/codefresh-yaml/condition-expression-syntax/) +* [Working Directories]({{site.baseurl}}/docs/codefresh-yaml/working-directories/) +* [Annotations]({{site.baseurl}}/docs/codefresh-yaml/annotations/) +* [Pipeline/Step hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/) diff --git a/_docs/pipelines/conditional-execution-of-steps.md b/_docs/pipelines/conditional-execution-of-steps.md new file mode 100644 index 00000000..6f60990b --- /dev/null +++ b/_docs/pipelines/conditional-execution-of-steps.md @@ -0,0 +1,247 @@ +--- +title: "Conditional execution of steps" +description: "Skip specific pipeline steps according to one or more conditions" +group: pipelines +redirect_from: + - /docs/conditional-execution-of-steps/ +toc: true +--- +For each step in a `codefresh.yml` file, you can define a set of conditions which need to be satisfied in order to execute the step. (An introduction to the `codefresh.yml` file can be found [here]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/).) + +There are currently two main methods to define conditions: +* Branch conditions +* Expression conditions + +## Branch Conditions + +Usually, you'll want to define a branch condition, be it of the type ```ignore``` for blacklisting a set of branches or of the type ```only``` for allowlisting a set of branches. Each branch specification can either be an exact branch name, e.g. ```master```, or a regular expression, e.g. ```/hotfix$/```. Case insensitive regexps (```/^FB-/i```) are also supported. + +Here are some examples: + +Only execute for the ```master``` branch: + + `only-master-branch.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + branch: + only: + - master +{% endhighlight %} + +Only execute for branches whose name begins with ```FB-``` prefix (feature branches): + + `only-feature-branches.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + branch: + only: + - /^FB-.*/i +{% endhighlight %} + +Ignore the develop branch and master branch: + + `ignore-master-and-develop-branch.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + branch: + ignore: + - master + - develop +{% endhighlight %} + + +>We use [JavaScript regular expressions](https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions) for the syntax in branch conditions. + + +## Condition expressions + +Alternatively, you can use more advanced condition expressions. + +This follows the standard [condition expression syntax](#condition-expression-syntax). In this case, you can choose to execute if ```all``` expression conditions evaluate to ```true```, or to execute if ```any``` expression conditions evaluate to ```true```. + +> Note: Use "" around variables with text to avoid errors in processing the conditions. Example: "${{CF_COMMIT_MESSAGE}}" + +Here are some examples. Execute if the string ```[skip ci]``` is not part of the main repository commit message AND if the branch is ```master``` + + `all-conditions.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + condition: + all: + noSkipCiInCommitMessage: 'includes(lower({% raw %}"${{CF_COMMIT_MESSAGE}}"{% endraw %}), "skip ci") == false' + masterBranch: '{% raw %}"${{CF_BRANCH}}{% endraw %}" == "master"' +{% endhighlight %} + +Execute if the string ```[skip ci]``` is not part of the main repository commit message, OR if the branch is not a feature branch (i.e. name starts with FB-) + + `any-condition.yml` +{% highlight yaml %} +build-step: + description: Building the image. + type: build + dockerfile: Dockerfile + image-name: someRepo/someUser + when: + condition: + any: + noSkipCiInCommitMessage: 'includes(lower({% raw %}"${{CF_COMMIT_MESSAGE}}"{% endraw %}), "skip ci") == false' + notFeatureBranch: 'match({% raw %}"${{CF_BRANCH}}"{% endraw %}, "^FB-", true) == false' +{% endhighlight %} + +Each step in `codefresh.yml` file can contain conditions expressions that must be satisfied for the step to execute. + +This is a small example of where a condition expression can be used: + `YAML` +{% highlight yaml %} +step-name: + description: Step description + image: image/id + commands: + - bash-command1 + - bash-command2 + when: + condition: + all: + executeForMasterBranch: "{% raw %}'${{CF_BRANCH}}{% endraw %}' == 'master'" +{% endhighlight %} + +### Condition expression syntax +A condition expression is a basic expression that is evaluated to true/false (to decide whether to execute or not to execute), and can have the following syntax: + +#### Types + +{: .table .table-bordered .table-hover} +| Type | True/False Examples | True/False | +| ------- | ----------------------------------------- | --------------| +| String | True: "hello"
                                                        False: "" | {::nomarkdown}
                                                        • String with content = true
                                                        • Empty string = false
                                                        • String with content = true
                                                        String comparison is lexicographic.{:/} | +| Number | True: 5
                                                        True: 3.4
                                                        True: 1.79E+308 | {::nomarkdown}
                                                        • Any number other than 0 = true.
                                                        • 0 = false
                                                        {:/} | +| Boolean | True: true
                                                        False: false | {::nomarkdown}
                                                        • True = true
                                                        • False = false
                                                        {:/} | +| Null | False: null | Always false | + +#### Variables + +You can use the User Provided variables as explained in [Variables]({{site.baseurl}}/docs/pipelines/variables/), including the [variables +exposed by each individual pipeline step]({{site.baseurl}}/docs/pipelines/variables/#step-variables). + +#### Unary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ---------- | --------------------- | +| `-` | Negation of numbers | +| `!` | Logical NOT | + +#### Binary Operators + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| --------------------------- | ----------- | +| Add, String Concatenation | `+` | +| Subtract | `-` | +| Multiply | `*` | +| Divide | `/` | +| Modulus | `%` | +| Logical AND | `&&` | +| Logical OR | `||` | + +#### Comparisons + +{: .table .table-bordered .table-hover} +| Operator | Operation | +| ----------- | ---------------------- | +| `==` | Equal to | +| `!=` | Not equal to | +| `>` | Greater than | +| `>=` | Greater than or equal | +| `<` | Less than | +| `<=` | Less than or equal | + +#### Functions + +{: .table .table-bordered .table-hover} +| Function Name | Parameters | Return value | Example | +| ------------- | ------------------ | -------------- | ----------------------- | +| String | 0: number or string | String of input value. | `String(40) == '40'` | +| Number | 0: number or string | Number of input value. | `Number('50') == 50`
                                                        `Number('hello')` is invalid | +| Boolean | 0: number or string | Boolean of input value. | `Boolean('123') == true`
                                                        `Boolean('') == false`
                                                        `Boolean(583) == true`
                                                        `Boolean(0) == false` | +| round | 0: number | Rounded number. | `round(1.3) == 1`
                                                        `round(1.95) == 2` | +| floor | 0: number | Number rounded to floor. | `floor(1.3) == 1`
                                                        `floor(1.95) == 1` | +| upper | 0: string | String in upper case. | `upper('hello') == 'HELLO'` | +| lower | 0: string | String in lower case. | `lower('BYE BYE') == 'bye bye'` | +| trim | 0: string | Trimmed string. | `trim(" abc ") == "abc"` | +| trimLeft | 0: string | Left-trimmed string. | `trimLeft(" abc ") == "abc "`| +| trimRight | 0: string | Right-trimmed string. | `trimRight(" abc ") == " abc"` | +| replace | 0: string - main string
                                                        1: string - substring to find
                                                        2: string - substring to replace | Replace all instances of the sub-string (1) in the main string (0) with the sub-string (2). | `replace('hello there', 'e', 'a') == 'hallo thara'`| +| substring | 0: string - main string
                                                        1: string - index to start
                                                        2: string - index to end | Returns a sub-string of a string. | `substring("hello world", 6, 11) == "world"` | +| length | string | Length of a string. | `length("gump") == 4` | +| includes | 0: string - main string
                                                        1: string - string to search for | Whether a search string is located within the main string. | `includes("codefresh", "odef") == true` | +| indexOf | 0: string - main string
                                                        1: string - string to search for | Index of a search string if it is found inside the main string | `indexOf("codefresh", "odef") == 1` | +| match | 0: string - main string
                                                        1: string - regular expression string, [JS style](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) (Note: in JS strings, the backslash `\` is an escape character so in order to use a literal backslash, you need to escape it. For example: `"^\\d+$"` instead of `"^\d+$"`)
                                                        2: boolean - ignore case | Search for a regular expression inside a string, ignoring or not ignoring case | `match("hello there you", "..ll.", false) == true`
                                                        `match("hello there you", "..LL.", false) == false`
                                                        `match("hello there you", "hell$", true) == false`
                                                        `match("hello there you", "^hell", true) == true`
                                                        `match("hello there you", "bye", false) == false` | +| Variable | string | Search for the value of a variable | `Variable('some-clone')` | +| Member | 0: string - variable name
                                                        1: string - member name | Search for the value of a variable member | `Member('some-clone', 'working-directory')` | + +## Execute steps according to the presence of a variable + +If a variable does not exist in a Codefresh pipeline, then it will simply stay as a string inside the definition. When the `{% raw %}${{MY_VAR}}{% endraw %}` variable is not available, the engine will literally print `{% raw %}${{MY_VAR}}{% endraw %}`, because that variable doesn't exist. + +You can use this mechanism to decide which steps will be executed if a [variable]({{site.baseurl}}/docs/pipelines/variables/) exists or not. + + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Running if variable exists" + type: "freestyle" + image: "alpine:3.9" + commands: + - echo "Step 1 is running" + when: + condition: + all: + whenVarExists: 'includes("${{MY_VAR}}", "{{MY_VAR}}") == false' + step2: + title: "Running if variable does not exist" + type: "freestyle" + image: "alpine:3.9" + commands: + - echo "Step 2 is running" + when: + condition: + all: + whenVarIsMissing: 'includes("${{MY_VAR}}", "{{MY_VAR}}") == true' +{% endraw %} +{% endhighlight %} + +Try running the pipeline above and see how it behaves when a variable called `MY_VAR` exists (or doesn't exist). + +>Notice that if you use this pattern a lot it means that you are trying to create a complex pipeline that is very smart. We suggest you create instead multiple [simple pipelines for the same project]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/#trunk-based-development). + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Variables]({{site.baseurl}}/docs/pipelines/variables/) +[Pull Requests and Branches]({{site.baseurl}}/docs/ci-cd-guides/pull-request-branches/) +[Pipeline/Step hooks]({{site.baseurl}}/docs/pipelines/hooks/) diff --git a/_docs/pipelines/configuration/build-status.md b/_docs/pipelines/configuration/build-status.md new file mode 100644 index 00000000..02d9e897 --- /dev/null +++ b/_docs/pipelines/configuration/build-status.md @@ -0,0 +1,150 @@ +--- +title: "Public logs and status badges" +description: "Embedding Status Images and viewing public logs" +group: pipelines +toc: true +redirect_from: + - /docs/build-status + - /docs/build-status/ + - /docs/build-badges-1 + - /docs/build-badges-1/ +--- + + +Badges are simple images that show you the last build status. They support both the pipeline and branch service status. +The badges can be embedded into your repository’s `readme.md` file or any other website. + +Here is an example: + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/badge.png" +url="/images/pipeline/badges/badge.png" +alt="Build badge example" +caption="Build badge example" +max-width="80%" +%} + +Clicking the badge takes you into the build view of the pipeline. + +## Finding the build badge of your project + +In the pipeline view of a project, select the *Settings* tab and then click *General*. Next to the *badges* section you will find a link to the build badge. + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/get-build-badge.png" +url="/images/pipeline/badges/get-build-badge.png" +alt="Build badge setup" +caption="Build badge setup" +max-width="80%" +%} + +Click on it and you will get a new dialog where you can select + + * The graphical style of the badge (two styles are offered) + * The syntax for the badge + +{% include + image.html + lightbox="true" + file="/images/a0c4aed-codefresh_badges_2.png" + url="/images/a0c4aed-codefresh_badges_2.png" + alt="Codefresh badges syntax" + caption="Codefresh badges syntax" + max-width="70%" + %} + + The following embedding options are available: + + * Markdown for usage in text files (e.g. `README.MD`) + * Plain HTML for normal websites + * AsciiDoc for documentation pages + * Image for any other document type + + +Copy the snippet in your clipboard. + +## Using the build badge + +Paste the snippet in the file/document where you want the badge to be visible (e.g. in a Readme file in GitHub). + +For example, the markdown syntax is + +``` +[![Codefresh build status]( BADGE_LINK )]( URL_TO_PIPELINE ) +``` + +You can also manually change the parameters of the link by using +`https://g.codefresh.io/api/badges/build?*param1*=xxx&*param2*=yyy`\\ +when *param1*, *param2*, etc... are the parameters from the table below. + +{: .table .table-bordered .table-hover} +| Query parameter | Description | +| -----------------------|--------------------------------------------------------- | +| **branch** - optional | Name of the branch
                                                        If not supplied, default is master | +| **repoName** | Name of the repository | +| **pipelineName** | Name of the pipeline | +| **accountName** | Name of the account | +| **repoOwner** | The name of the repository owner | +| **key** - optional | Token related to the account | +| **type** - optional | Badge types
                                                        cf-1: ![Codefresh build status]( http://g.codefresh.io/api/badges/build/template/urls/cf-1) - also the default badge.
                                                        cf-2: ![Codefresh build status]( http://g.codefresh.io/api/badges/build/template/urls/cf-2) | + +Everybody who looks at your readme file will also see the current build status of the associated Codefresh pipeline. + +## Public build logs + +By default, even though the badge shows the build status for everybody, clicking the badge allows only Codefresh registered users that also have access to the pipeline to view the actual builds. + +If you are working on an open-source project and wish for greater visibility, you can enable public logs (and associated badge) for your project so that any user can see the pipeline results (even if they are not logged into Codefresh). + +Public logs are disabled by default and you need to explicitly enable them. + +>This happens for security reasons. Make sure that the logs you are exposing to the Internet do not have any sensitive information. If you are unsure, you can still use the private badge that shows project status only as explained in the previous section. + +To enable the public logs, toggle the respective switch in the pipeline settings: + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/toggle-public-logs.png" +url="/images/pipeline/badges/toggle-public-logs.png" +alt="Enabling public logs" +caption="Enabling public logs" +max-width="80%" +%} + +Then click the *Save* button to apply changes for your pipeline. Once that is done you will also get a second badge (public) as well as the public URL to your project. + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/get-public-url.png" +url="/images/pipeline/badges/get-public-url.png" +alt="Getting the public URL log view" +caption="Getting the public URL log view" +max-width="70%" +%} + +Now you can use this badge and/or public URL anywhere and all users can view your logs without being logged into Codefresh at all (or having access to your pipeline). + +{% include +image.html +lightbox="true" +file="/images/pipeline/badges/view-public-logs.png" +url="/images/pipeline/badges/view-public-logs.png" +alt="Public logs" +caption="Public logs" +max-width="90%" +%} + +Your visitors can also click on each individual pipeline step and see the logs for that step only. + +If you are using Codefresh to manage a public project, you should also use the capability to [trigger builds from external forks]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#support-for-building-pull-requests-from-forks). + +## Related articles +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Monitoring pipelines]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/) diff --git a/_docs/pipelines/configuration/pipeline-settings.md b/_docs/pipelines/configuration/pipeline-settings.md new file mode 100644 index 00000000..bfecdb18 --- /dev/null +++ b/_docs/pipelines/configuration/pipeline-settings.md @@ -0,0 +1,86 @@ +--- +title: "Global settings for CI pipelines" +description: "Define global options for pipeline templates, yaml sources and approval behavior" +group: pipelines +toc: true +--- + +To access your global pipeline settings navigate to [https://g.codefresh.io/account-admin/account-conf/pipeline-settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings) or click on *Account settings* on the left sidebar and then choose *Pipeline settings* item on the next screen. + +On this page, you can define global parameters for the whole Codefresh account regarding pipeline options. Users can still override some of these options for individual pipelines. + +{% include image.html +lightbox="true" +file="/images/pipeline/pipeline-settings/pipeline-settings-ui.png" +url="/images/pipeline/pipeline-settings/pipeline-settings-ui.png" +alt="Pipeline settings" +caption="Pipeline settings" +max-width="80%" +%} + + +## Pause pipeline executions + +Pause builds for pipelines at the account level, for example, during maintenance. + +* **Pause build execution** is disabled by default. +* When enabled: + * New pipelines in the account are paused immediately. + * Existing pipelines with running builds are paused only after the builds have completed execution. +* Paused pipelines are set to status Pending, and remain in this status until **Pause build execution** is manually disabled for the account. + +{% include image.html +lightbox="true" +file="/images/pipeline/pipeline-settings/pause-pipeline-enabled.png" +url="/images/pipeline/pipeline-settings/pause-pipeline-enabled.png" +alt="Pause Build Execution pipeline setting enabled" +caption="Pause Build Execution pipeline setting enabled" +max-width="80%" +%} + +## Template section + +Here you can define global template behavior. The options are: + +* Enable [pipeline templates]({{site.baseurl}}/docs/docs/pipelines/pipelines/#using-pipeline-templates) for users. If this is enabled some pipelines can be marked as templates and users can still select them when creating a new pipeline. +* Decide if users can clone an existing pipeline (along with its triggers and associated parameters) when [creating a new pipeline]({{site.baseurl}}/docs/docs/pipelines/pipelines/#creating-new-pipelines). + +Note that templates are simply normal pipelines “marked” as a template. There is no technical difference between templates and actual pipelines. + +## Pipeline YAML section + +Here you can restrict the sources of pipeline YAML that users can select. The options are: + +* Enable/Disable the [inline editor]({{site.baseurl}}/docs/docs/pipelines/pipelines/#using-the-inline-pipeline-editor) where YAML is stored in Codefresh SaaS +* Enable/disable pipeline YAML from connected Git repositories +* Enable/disable pipeline YAML from [external URLs]({{site.baseurl}}/docs/docs/pipelines/pipelines/#loading-codefreshyml-from-version-control) + +You need to allow at least one of these options so that users can create new pipelines. We suggest leaving the first option enabled when users are still learning about Codefresh and want to experiment. + +## Advanced pipeline options + +Here you can set the defaults for advanced pipeline behavior. The options are: + +* [Keep or discard]({{site.baseurl}}/docs/pipelines/steps/approval/#keeping-the-shared-volume-after-an-approval) the volume when a pipeline is entering approval state +* Whether pipelines in approval state [count or not against concurrency]({{site.baseurl}}/docs/pipelines/steps/approval/#define-concurrency-limits) +* Define the [Service Account]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/#setting-up-ecr-integration---service-account) for Amazon ECR integration. +* Set the default registry where all Public Marketplace Step images are pulled from. Registries listed are from the [Docker Registry]({{site.baseurl}}/docs/integrations/docker-registries/) integration page. + * Example: Public Marketplace Step image is defined to use Docker Hub. If you select a quay.io integration, all Public Marketplace Step images will be pulled from quay.io instead of Docker Hub. + * Note: This does not affect Freestyle Steps. + +Note that the first option affects pipeline resources and/or billing in the case of SaaS pricing. It will also affect users of existing pipelines that depend on this behavior. It is best to enable/disable this option only once at the beginning. + +## Default Behavior for Build Step + +Here you can decide if the build step will push images or not according to your organization’s needs. The options are: + +1. Users need to decide if an image will be pushed or not after it is built +2. All built images are automatically pushed to the default registry +3. All built images are NOT pushed anywhere by default + +Note that this behavior is simply a convenience feature for legacy pipelines. Users can still use a [push step]({{site.baseurl}}/docs/pipelines/steps/push/) in a pipeline and always push an image to a registry regardless of what was chosen in the build step. + +## Related articles +[Creating Pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Git Integration]({{site.baseurl}}/docs/integrations/git-providers/) diff --git a/_docs/pipelines/configuration/secrets-store.md b/_docs/pipelines/configuration/secrets-store.md new file mode 100644 index 00000000..d447ec88 --- /dev/null +++ b/_docs/pipelines/configuration/secrets-store.md @@ -0,0 +1,96 @@ +--- +title: "Secrets in CI pipelines" +description: "Use Kubernetes secrets in Codefresh" +group: pipelines +toc: true +--- + +Once you have [connected Codefresh to your secrets storage]({{site.baseurl}}/docs/integrations/secret-storage/), you can use them in any pipeline or UI screen. + +> Note: This feature is for Enterprise accounts only. + +## Using secrets in pipelines + +The syntax for using the secret is {% raw %}`${{secrets.NAME_IN_CODEFRESH.KEY}}`{% endraw %}. + +> If you did not include the resource-name as a part of your secret store context creation, the syntax for using your secret differs slightly: + {% raw %}${{secrets.NAME_IN_CODEFRESH.RESOURCE-NAME@KEY}}{% endraw %} + The previous KEY portion is now made of two parts separated using @, where the left side is the name of the resource in the namespace, and the right side the key in that resource. + +To use the secret in your pipeline, you have two options: + +* Define it as a pipeline variable: + +{% include +image.html +lightbox="true" +file="/images/pipeline/secrets/secrets-pipeline-var.png" +url="/images/pipeline/secrets/secrets-pipeline-var.png" +alt="Secrets Pipeline Variable" +caption="Secrets stored in Pipeline Variable" +max-width="80%" +%} + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + step: + type: freestyle + arguments: + image: alpine + commands: + - echo $SECRET +{% endraw %} +{% endhighlight %} + +* Use the secret directly in your YAML + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + step: + type: freestyle + arguments: + image: alpine + environment: + - SECRET=${{secrets.test.key1}} + commands: + - echo $SECRET +{% endraw %} +{% endhighlight %} + + +## Using secrets in the Codefresh UI + +You can also use secrets in the GUI screens that support them. Currently you can use secrets in: + +* Values in [shared configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) +* Integration with [cloud storage]({{site.baseurl}}/docs/testing/test-reports/#connecting-your-storage-account) + +Where secret integration is supported, click on the lock icon and enable the toggle button. You will get a list of your connected secrets: + + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +url="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +alt="Using external secrets in shared configuration values" +caption="Using external secrets in shared configuration values" +max-width="50%" +%} + +If you have already specified the resource field during secret definition the just enter on the text field the name of the secret directly, i.e. `my-secret-key`. +If you didn't include a resource name during secret creation then enter the full name in the field like `my-secret-resource@my-secret-key`. + + +## Related articles +[Shared Configuration]({{site.baseurl}}/docs/pipelines/shared-configuration/) +[Git triggers]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) +[Running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/) +[Debugging Pipelines]({{site.baseurl}}/docs//yaml-examples/examples/trigger-a-k8s-deployment-from-docker-registry/) + diff --git a/_docs/pipelines/configuration/shared-configuration.md b/_docs/pipelines/configuration/shared-configuration.md new file mode 100644 index 00000000..0ce4f506 --- /dev/null +++ b/_docs/pipelines/configuration/shared-configuration.md @@ -0,0 +1,264 @@ +--- +title: "Shared configuration for CI pipelines" +description: "How to keep your pipelines DRY" +group: pipelines +toc: true +--- + +After creating several pipelines in Codefresh, you will start to notice several common values between them. Common examples are access tokens, environment URLs, configuration properties etc. + +Codefresh allows you to create those shared values in a central place and then reuse them in your pipelines +avoiding the use of copy-paste. + +You can share: + +* Environment parameters (easy) +* Helm values (easy) +* Any kind of YAML data (advanced) + + +## Creating shared configuration + +From the left sidebar click *Account settings* to enter your global settings. Then choose *Shared Configuration* from the left menu. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-configuration.png" +url="/images/pipeline/shared-configuration/shared-configuration.png" +alt="Creating shared configuration snippets" +caption="Creating shared configuration snippets" +max-width="50%" +%} + +You can create four types of shared configuration: + +* **Shared Configuration**: for environment variables +* **Shared Secret**: for encrypted environment variables of sensitive data (access tokens, etc.) +* **YAML**: for Helm values or any other generic information +* **Secret YAML**: for above, but encrypts the contents + +>RBAC is supported for all types of shared configurations. + +You can create as many shared snippets as you want (with unique names). + +### Using external secrets as values + +Note that the default "shared secrets" and "secret yaml" entities use the built-in secret storage of Codefresh. You can also +use any [external secrets that you have defined]({{site.baseurl}}/docs/integrations/secret-storage/) (such as Kubernetes secrets), by using the normal entities and then clicking on the lock icon that appears. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +url="/images/pipeline/shared-configuration/shared-conf-secret-integration.png" +alt="Using external secrets in shared configuration values" +caption="Using external secrets in shared configuration values" +max-width="50%" +%} + +If you have already specified the resource field during secret definition the just enter on the text field the name of the secret directly, i.e. `my-secret-key`. +If you didn't include a resource name during secret creation then enter the full name in the field like `my-secret-resource@my-secret-key`. + +### Level of access + +For each set of values you can toggle the level of access by [non-admin users]({{site.baseurl}}/docs/administration/access-control/#users-and-administrators). If it is off, users will **not** be able to use the [CLI](https://codefresh-io.github.io/cli/) or [API]({{site.baseurl}}/docs/integrations/codefresh-api/) +to access these [values](https://codefresh-io.github.io/cli/contexts/). If it is on, all users from all your Codefresh teams will be able to access this set of values +with CLI commands or API calls. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-config-access.png" +url="/images/pipeline/shared-configuration/shared-config-access.png" +alt="Allow access to non-admin users" +caption="Allow access to non-admin users" +max-width="60%" +%} + +We recommend that you disable access for all values of type *shared secret* and *secret YAML* unless your organization has different needs. + + +## Using shared environment variables + +Each pipeline has a set of environment variables that can be defined in the *Workflow* screen. +To import a shared configuration open the pipeline editor, and from the tabs on the right side select *VARIABLES*. Then click the gear icon to *Open Advanced Options*: + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/environment-variables.png" +url="/images/pipeline/shared-configuration/environment-variables.png" +alt="Pipeline environment variables" +caption="Pipeline environment variables" +max-width="50%" +%} + +To use your shared configuration, click the *Import from shared configuration* button and select the snippet from the list: + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/import-variables.png" +url="/images/pipeline/shared-configuration/import-variables.png" +alt="Importing shared configuration" +caption="Importing shared configuration" +max-width="50%" +%} + +Once you click *Add* the values from the shared configuration will be appended to the ones +you have in your pipelines. In case of similar values the shared configuration will follow the [precedence rules]({{site.baseurl}}/docs/pipelines/variables/#user-provided-variables). + + +## Using shared Helm values + +To use a shared YAML snippet for Helm values you can install a new Helm chart either from: + +* The [Helm chart list]({{site.baseurl}}/docs/new-helm/add-helm-repository/#install-chart-from-your-helm-repository) +* The [Helm environment board]({{site.baseurl}}/docs/new-helm/helm-environment-promotion/#moving-releases-between-environments). + +In both cases, when you see the Helm installation dialog you can import any of your YAML snippets +to override the default chart values. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/helm-import.png" +url="/images/pipeline/shared-configuration/helm-import.png" +alt="Importing Helm values" +caption="Importing Helm values" +max-width="50%" +%} + +From the same dialog you can also create a brand-new shared configuration snippet of type YAML. +Not only it will be used for this Helm chart, but it will be added in your global shared configuration as well. + +## Using values from the Shared Configuration in your Helm step + +Additionally, you can define shared variables in your account settings and reuse those across your Helm steps, and specifically, in your [custom Helm values]({{site.baseurl}}/docs/docs/new-helm/using-helm-in-codefresh-pipeline/#helm-values). + +Under *Account Setting* > *Shared Configuration*, add the variable to your shared configuration. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/helm-shared-variables.png" +url="/images/pipeline/shared-configuration/helm-version-shared.png" +alt="Adding shared configuration variables" +caption="Adding shared configuration variables" +max-width="50%" +%} + +Go to the workflow of the Codefresh pipeline to which you want to add the variable. Then select *variables* from the right sidebar. *Open advanced configuration* and select *Import from shared configuration*. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/environment-variables.png" +url="/images/pipeline/shared-configuration/environment-variables.png" +alt="Pipeline environment variables" +caption="Pipeline environment variables" +max-width="50%" +%} + +This will allow you to add shared variables. + +{% include +image.html +lightbox="true" +file="/images/pipeline/shared-configuration/shared-helm-variables.png" +url="/images/pipeline/shared-configuration/shared-helm-variables.png" +alt="Shared helm variable" +caption="Shared helm variable" +max-width="50%" +%} + +Add the shared variables to your Helm step: + +{% highlight shell %} +{% raw %} +deploy: + type: "helm" + working_directory: "./react-article-display" + stage: "deploy" + arguments: + action: "install" + chart_name: "charts/example-chart" + release_name: "test-chart" + helm_version: "${{HELM_VERSION}}" + kube_context: "anais-cluster@codefresh-sa" + custom_values: + - 'pullPolicy=${{PULL_POLICY}}' +{% endraw %} +{% endhighlight %} + +The shared variables can now be used across your pipelines. + +## Sharing any kind of YAML data in pipelines + +All the snippets from shared configuration are also available as context in the [Codefresh CLI](https://codefresh-io.github.io/cli/contexts/) + +This means that you can manipulate them programmatically and read their values in the pipeline in any way you see fit. + +If for example you have a shared configuration named `my-global-config` you can easily read its contents programmatically using the CLI: + +{% highlight shell %} +$codefresh get context my-global-config --output=yaml + +apiVersion: v1 +kind: context +metadata: + default: false + system: false + name: my-global-config +type: config +spec: + type: config + data: + foo: bar +{% endhighlight %} + +### Example - custom value manipulation + +Let's say that you have a YAML segment with the following contents: + +{% highlight yaml %} +favorite: + drink: coffee + food: pizza +{% endhighlight %} + +Here is a pipeline step that is reading the yaml snippet and extracts a value + + `YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyFavoriteFoodStep: + title: Favorite food + image: codefresh/cli + commands: + - echo I love eating $(codefresh get context my-food-values --output=json | jq -r '.spec.data.favorite.food') +{% endraw %} +{% endhighlight %} + +Once the pipeline runs, you will see in the logs: + +``` +I love eating pizza +``` + +## Manipulating shared configuration programmatically + +You can also create/update/delete shared configuration via the [Codefresh CLI](https://codefresh-io.github.io/cli/) or [API]({{site.baseurl}}/docs/integrations/codefresh-api/). + +See the [context section](https://codefresh-io.github.io/cli/contexts/create-context/) in the CLI documentation. + + + +## Related articles +[Variables]({{site.baseurl}}/docs/pipelines/variables/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) + diff --git a/_docs/pipelines/debugging-pipelines.md b/_docs/pipelines/debugging-pipelines.md new file mode 100644 index 00000000..a591378a --- /dev/null +++ b/_docs/pipelines/debugging-pipelines.md @@ -0,0 +1,250 @@ +--- +title: "Debugging CI pipelines" +description: "Pause and inspect pipelines" +group: pipelines +toc: true +--- + +In addition to [running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/), Codefresh also allows you to debug pipelines by stopping their execution and inspecting manually their state (files, environment variables, tools etc.) + + +The Codefresh pipeline debugger works similar to your IDE debugger. You can place breakpoints on one or more pipeline steps and once the pipeline hits one of them, it will stop. You will then get a terminal like interface inside your pipeline step where you can run any commands that you wish in order to understand the state of the container. + + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/debug-session.png" + url="/images/pipeline/debug/debug-session.png" + alt="A debugging session" + caption="A debugging session" + max-width="70%" +%} + +There are several options for defining exactly when a step will stop. + +## Entering the debugger mode + +There are threes ways to enter the debugging mode in a pipeline. You can activate the debugging button when your run the pipeline: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/run-pipeline-debug.png" + url="/images/pipeline/debug/run-pipeline-debug.png" + alt="Running a pipeline in debug mode" + caption="Running a pipeline in debug mode" + max-width="30%" +%} + +Alternatively if a pipeline is already running normally, you can enter debugging mode by clicking on the bug icon on the top right. + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/enter-debug-mode.png" + url="/images/pipeline/debug/enter-debug-mode.png" + alt="Switching to debug mode" + caption="Switching to debug mode" + max-width="60%" +%} + +You can restart a pipeline that has already finished in debug mode: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/restart-in-debug.png" + url="/images/pipeline/debug/restart-in-debug.png" + alt="Restart in debug mode" + caption="Restart in debug mode" + max-width="70%" +%} + +Now you are ready to place breakpoints in steps. + + +## Placing breakpoints + +Once the debugging mode is active, all pipeline steps will get an extra breakpoint icon on the far right of their box. + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/breakpoint.png" + url="/images/pipeline/debug/breakpoint.png" + alt="A step breakpoint" + caption="A step breakpoint" + max-width="70%" +%} + + +You can click on this icon and define a breakpoint for this particular step. You have the following options + +* *Before* - place a breakpoint before the step is initialized +* *Override* - place a breakpoint after the step has initialized but before its execution ([freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/)) +* *After* - place a breaking point after the step has finished execution. + +You can choose multiple debugging phases. In most cases the `Override` option is the most useful one. The `before` phase allows you to inspect +a pipeline step even before [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) are up. + +The `after` phase is useful if you want to verify files or variables after a step has finished its execution but before the next step starts. + + +## Using the debugger terminal + +Once the pipeline reaches a step that has a breakpoint, execution will pause and a new debugger terminal will become available: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/debug-window.png" + url="/images/pipeline/debug/debug-window.png" + alt="The debugging terminal" + caption="The debugging terminal" + max-width="60%" +%} + +You can now manually type commands to inspect your container. If your Codefresh plan has the basic debugging capabilities you can run the following commands: + +* `cd, ls` to see files +* `printenv` to see environment variables +* `cat` to read files +* `top` to see what is running +* `export` and [cf_export]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command) to create environment variables +* `exit` to finish the debugging session + +If you have placed a breakpoint in the `override` phase of a freestyle step then the container image is the same as the one defined in the step. Therefore you can execute all tools that you have placed in the image (e.g. compilers, linters, test frameworks etc.) + +In all cases the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) is automounted so you can examine your source code or any other intermediate artifacts placed in your project folder or the pipeline cache. + +If the breakpoint is on a `before` or `after` phase, the command line terminal is powered by an [alpine](https://alpinelinux.org/) image. The image has already useful tools such as `wget`, `nc` and `vi`. If you have the advanced debugging capabilities in your Codefresh plan you can then install additional tools on your own directly in the terminal with [apk](https://wiki.alpinelinux.org/wiki/Alpine_Linux_package_management). Examples: + +* `apk add curl` +* `apk add nano` +* `apk add go` +* `apk add python` + +Use the command `apk search foo` to search for a package named foo. + + +## Resuming execution + +Once you are happy with your debugging session, click the continue button to resume. + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/resume-button.png" + url="/images/pipeline/debug/resume-button.png" + alt="Continue execution button" + caption="Continue execution button" + max-width="60%" +%} + +The pipeline will continue and then stop for the next breakpoint (if any). You can still revisit the debugger window for previous steps to see what debugging commands you had executed. + +>Notice that to conserve resources, there is a 15 minute limit on each open debug session. If you don't resume the pipeline within 15 minutes after hitting a breakpoint the whole pipeline will stop with a timeout error. + +It is important to understand that if you have chosen the `override` phase in a freestyle step, then the commands mentioned in the pipeline definition are completely ignored. + +## Using the alternative debug window + +If you enable the debugger on a freestyle step with the "override" option, Codefresh will install some extra tooling on the Docker image that is needed for the debugger itself. + +By default, the internal debugger tooling is using node.js, so if your image is already based on Node.js, you might get version conflicts in your application. + +You can enable an alternative debugger by passing the variable `DEBUGGER_RUNNER = 2` on the whole pipeline: + +{% + include image.html + lightbox="true" + file="/images/pipeline/debug/alternative-debugger.png" + url="/images/pipeline/debug/alternative-debugger.png" + alt="Enabling the Python based debugger" + caption="Enabling the Python based debugger" + max-width="60%" +%} + +This debugger is based on Python instead of Node.js and it can work with both Python 2 and 3 Docker images. +This way the debugger tools will not affect your application. You can also use the same method in a specific freestyle step like this: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + hello_world_step: + title: freestyle step + image: node:11.1 + environment: + - 'DEBUGGER_RUNNER=2' +{% endraw %} +{% endhighlight %} + + + + + +## Inserting breakpoints in the pipeline definition + +It is also possible to mention breakpoints in the Codefresh YAML instead of using the UI. Breakpoints mentioned in the `codefresh.yml` file have no effect when the pipeline is not running in Debug mode. You need to run the pipeline in debug mode in order for them to stop the pipeline. + +Here is the syntax: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - build + - test +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/python-flask-sample-app' + revision: 'master' + git: github + stage: prepare + MyAppDockerImage: + title: Building Docker Image + type: build + stage: build + image_name: my-app-image + working_directory: ./ + tag: 'master' + dockerfile: Dockerfile + debug: + phases: + before: true + after: false + MyUnitTests: + title: Running Unit tests + stage: test + image: '${{MyAppDockerImage}}' + debug: + phases: + before: false + override: true + after: false + commands: + - python setup.py test +{% endraw %} +{% endhighlight %} + +Once you run this pipeline in debug mode, it will automatically have breakpoints in the respective steps (but you can still override/change them using the GUI). + + +## Troubleshooting + +The debugger windows needs some extra tools in a docker image in order to work (such as the `bash` shell). Codefresh automatically installs these tools on your image without any configuration. + +If you get the message *your linux distribution is not supported* please contact us so that we can examine your docker image and make sure it is compatible with the Codefresh debugger. + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/) diff --git a/_docs/pipelines/docker-image-metadata.md b/_docs/pipelines/docker-image-metadata.md new file mode 100644 index 00000000..3b6f5306 --- /dev/null +++ b/_docs/pipelines/docker-image-metadata.md @@ -0,0 +1,217 @@ +--- +title: "Docker image metadata" +description: "How to use custom metadata in your Docker images" +group: pipelines +redirect_from: + - /docs/metadata-annotations/ + - /docs/docker-registries/metadata-annotations/ +toc: true +--- +Images built by Codefresh can be annotated with customized metadata. +This article explains how to create advanced view of your images and enrich them with custom metadata which perfectly fits your flow and image management process. + +{% + include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png" + url="/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png" + alt="Codefresh Docker registry metadata" + max-width="65%" +%} + +>We have since expanded this feature and now you are able to add custom annotations to [pipelines and builds as well]({{site.baseurl}}/docs/pipelines/annotations/). Notice also that the syntax shown in this page is deprecated but still supported. For the new syntax +see [Hooks in pipelines]({{site.baseurl}}/docs/pipelines/hooks/). + +## Metadata types + +Images built by Codefresh can be annotated with an array of key-value metadata. +Metadata values may be of the following types: + +{: .table .table-bordered .table-hover} +| Annotation type | Guidelines | Example | +| --------------- | ------------------------------------------------ | -------------------------------------------------------- | +| String | Use string | 'Example note' | +| Number | use numeric value to set this kind of annotation | 9999 | +| Boolean | Use true / false value | true | +| Percentage bar | use 0-100 value ending with % | 85% | +| Link | use url | {% raw %}`${{CF_COMMIT_URL}}`{% endraw %} | + +You can also use [Expression evaluations]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/#condition-expression-syntax) to set metadata. + +## Annotate your images using Codefresh YAML +You can annotate an image as part of its build process and also on post build steps. + +{:.text-secondary} +### Build step Image Metadata Annotation +You can annotate an image as part of its build process by declaring the metadata value on the [Build step]({{site.baseurl}}/docs/pipelines/steps/build/): +1. The `metadata` attribute +2. The `set` operation +3. An array of key-value metadata + + `build-metadata-annotation` +{% highlight yaml %} +build_step: + type: build + ... + metadata: # Declare the metadata attribute + set: # Specify the set operation + - qa: pending + - commit_message: {% raw %}${{CF_COMMIT_MESSAGE}}{% endraw %} + - exit_code: 0 + - is_main: + evaluate: "{% raw %}'${{CF_BRANCH}}{% endraw %}' == 'main'" +{% endhighlight %} + +{:.text-secondary} +### Adding annotations to Built images on post-build steps +Any step in the YAML workflow can annotate built images by using [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). +To annotate a built image, configure any step with: +1. The post-step operation +2. The `metadata` attribute +3. The `set` operation +4. A list of target images with the variable syntax of {% raw %}`${{build_step_name.imageId}}`{% endraw %} +5. An array of key-value metadata + + `annotating-step` +{% highlight yaml %} +build_step: + type: build + ... + +any_step: + ... + on_success: # Execute only once the step succeeded + metadata: # Declare the metadata attribute + set: # Specify the set operation + - {% raw %}${{build_step.imageId}}{% endraw %}: # Select any number of target images + - qa: pending + + on_fail: # Execute only once the step failed + metadata: # Declare the metadata attribute + set: # Specify the set operation + - {% raw %}${{build_step.imageId}}{% endraw %}: # Select any number of target images + - exit_code: 1 + + on_finish: # Execute in any case + metadata: # Declare the metadata attribute + set: # Specify the set operation + - {% raw %}${{build_step.imageId}}{% endraw %}: # Select any number of target images + - is_main: + evaluate: "{% raw %}'${{CF_BRANCH}}'{% endraw %} == 'main'" +{% endhighlight %} + +### Example - Quality Image Metadata Annotation +You can set a quality indicator to images to show if they passed or failed tests. An image with the boolean annotation `CF_QUALITY` set to true will have a quality indicator in the 'Images' view. + + `YAML` +{% highlight yaml %} +version: '1.0' +steps: + build_step: + type: build + image_name: myrepo/imagename + working_directory: ./ + dockerfile: Dockerfile + + unit_test: + image: {% raw %}'${{build_step}}'{% endraw %} + working_directory: IMAGE_WORK_DIR + commands: + - echo test + on_success: + metadata: + set: + - {% raw %}'${{build_step.imageId}}'{% endraw %}: + - CF_QUALITY: true + on_fail: + metadata: + set: + - {% raw %}'${{build_step.imageId}}'{% endraw %}: + - CF_QUALITY: false +{% endhighlight %} + +Image quality has 3 indicators: +* True - this image is considered a quality image (ex. passed tests), +* False - this image is not considered a quality image (ex. when tests failed but the image was already built). +* No value (nobody set the annotation) - this image has no quality indicator. + +{% include image.html lightbox="true" file="/images/pipeline/docker-image/quality-image-annotation.png" url="/images/pipeline/docker-image/quality-image-annotation.png" caption="Quality image annotation" max-width="40%" %} + + +## Viewing Image Metadata Annotations +You can view an image's metadata annotation by: +1. Navigating to the `Images` view +2. Selecting the target image +3. Selecting the `Annotations` tab + +{% + include image.html + lightbox="true" + file="/images/codefresh-yaml/docker-image-metadata/annotations.png" + url="/images/codefresh-yaml/docker-image-metadata/annotations.png" + alt="Image annotations" + max-width="65%" +%} + +In addition, you can add selected annotations to the images table on images page. To display an annotation in the image table, click on the gear icon at the top right corner of image page and then select all annotations you want to display. + +{% include image.html lightbox="true" file="/images/aec92e8-Screen_Shot_2017-10-17_at_3.01.26_PM.png" url="/images/aec92e8-Screen_Shot_2017-10-17_at_3.01.26_PM.png" alt="Screen Shot 2017-10-08 at 8.28.35 AM.png" max-width="40%" %} + + +## Annotating images programmatically + +It is also possible to annotate images with the [Codefresh CLI](https://codefresh-io.github.io/cli/). + +First find the id of an image that you wish to annotate with the command + +``` +codefresh get images +``` + +You can also search for a specific image by name: + +``` +$ codefresh get images --image-name custom +ID NAME TAG CREATED SIZE PULL +b5f103a87856 my-custom-docker-image bla Fri Feb 01 2019 91.01 MB r.cfcr.io/kostis-codefresh/my-custom-docker-image:bla +``` +Then once you have the ID of the image you can use the [annotate command](https://codefresh-io.github.io/cli/images/annotate-image/) to add extra metadata: + +``` +codefresh annotate image b5f103a87856 -l coverage=75 +``` + +## Using custom metadata in Codefresh pipelines + +You can also use the Codefresh CLI to fetch existing metadata from images. It is then very easy to extract and process specific fields with [yq](https://github.com/kislyuk/yq) + +Here is an example +``` +$ codefresh get image b5f103a87856 --output=yaml | yq -r .annotations.coverage +75 +``` + +You can then easily process the metadata (e.g. with scripts) and take decisions according to them. Here is an example +step that will fail the build if test coverage on an image is less than 80% + + `YAML` +{% highlight yaml %} +version: '1.0' +steps: + findLabel: + title: Get image label for coverage + image: codefresh/cli + commands: + - export MY_COVERAGE=$(codefresh get image b5f103a87856 --output=yaml | yq -r .annotations.coverage) + - echo "Coverage is $MY_COVERAGE" + - if [[ $MY_COVERAGE -lt "80" ]]; then exit 1 ; fi + +{% endhighlight %} + +The possibilities are endless as you can take any combination of image metadata and use any complex conditional +in order to process them in a Codefresh pipeline. + + +## Related articles +[External Docker Registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +[Accessing a Docker registry from your Kubernetes cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/access-docker-registry-from-kubernetes/) diff --git a/_docs/pipelines/docker-operations.md b/_docs/pipelines/docker-operations.md deleted file mode 100644 index 4678a46d..00000000 --- a/_docs/pipelines/docker-operations.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Using Docker" -description: "" -group: pipelines -toc: true ---- - -Coming soon diff --git a/_docs/pipelines/hooks.md b/_docs/pipelines/hooks.md new file mode 100644 index 00000000..68a75d5c --- /dev/null +++ b/_docs/pipelines/hooks.md @@ -0,0 +1,634 @@ +--- +title: "Hooks in CI pipelines" +description: "Execute commands before/after each pipeline or step" +group: pipelines +toc: true +--- + +Hooks in pipelines allow you to run specific actions at the end and the beginning of the pipeline, as well as before/after a step. + +Hooks can be a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/), as you need to define: + +1. A Docker image that will be used to run specific commands. +1. One or more commands to run within the context of that Docker image. + +For simple commands we suggest you use a small image such as `alpine`, but any Docker image can be used in hooks. + +## Pipeline hooks + +Codefresh allows you to run a specific step before each pipeline as well as after it has finished. + +### Running a step at the end of the pipeline + +You can easily run a step at the end of pipeline, that will execute even if one of the steps have failed (and thus the pipeline is stopped in middle): + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "cleanup after end of pipeline" + +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Hello world" + step2: + title: "Step 2" + type: "freestyle" + image: node:10-buster + commands: + - echo "There was an error" + - exit 1 +{% endraw %} +{% endhighlight %} + +In the example above we define a hook for the whole pipeline that will run a step (the `exec` keyword) inside `alpine:3.9` and will simply execute an `echo` command. Because we have used the `on_finish` keyword, this step will execute even if the whole pipeline fails. + +This scenario is very common if you have a cleanup step or a notification step that you always want to run at the end of the pipeline. You will see the cleanup logs in the top pipeline step. + + {% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/hooks/cleanup-step.png" +url="/images/pipeline/codefresh-yaml/hooks/cleanup-step.png" +alt="Running a cleanup step" +caption="Running a cleanup step" +max-width="80%" +%} + +Apart from the `on_finish` keyword you can also use `on_success` and `on_fail` if you want the step to only execute according to a specific result of the pipeline. It is also possible to use multiple hooks at the same time: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "cleanup after end of pipeline" + on_success: + exec: + image: alpine:3.9 + commands: + - echo "Send a notification only if pipeline was successful" + on_fail: + exec: + image: alpine:3.9 + commands: + - echo "Send a notification only if pipeline has failed" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Hello world" + step2: + title: "Step 2" + type: "freestyle" + image: node:10-buster + commands: + - echo "There was an error" + - exit 1 #Comment this line out to see how hooks change + +{% endraw %} +{% endhighlight %} + +Note that if you have multiple hooks like the example above, the `on_finish` segment will always execute after any `on_success`/`on_fail` segments (if they are applicable). + + +### Running a step at the start of the pipeline + +Similar to the end of the pipeline, you can also execute a step at the beginning of the pipeline with the `on_elected` keyword: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "Creating an adhoc test environment" + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "Destroying test environment" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" + step2: + title: "Step 2" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running acceptance tests on test environment" + +{% endraw %} +{% endhighlight %} + +All pipeline hooks will be shown in the "initializing process" logs: + + {% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/hooks/before-pipeline.png" +url="/images/pipeline/codefresh-yaml/hooks/before-pipeline.png" +alt="Hooks before a pipeline" +caption="Hooks before a pipeline" +max-width="80%" +%} + +It is possible to define all possible hooks (`on_elected`, `on_finish`, `on_success`, `on_fail`) in a single pipeline, if this is required by your development process. + +## Step hooks + +Hooks can also be defined for individual steps inside a pipeline. This capability allows for more granular control on defining prepare/cleanup phases for specific steps. + +The syntax for step hooks is the same as pipeline hooks (`on_elected`, `on_finish`, `on_success`, `on_fail`), you just need to put the respective segment under a step instead of the root of the pipeline. + +For example, this pipeline will always run a cleanup step after integration tests (even if the tests themselves fail). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Compile application" + type: "freestyle" + image: node:10-buster + commands: + - echo "Building application" + step2: + title: "Unit testing" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running unit tests" + hooks: + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "Create test report" + step3: + title: "Uploading artifact" + type: "freestyle" + image: node:10-buster + commands: + - echo "Upload to artifactory" +{% endraw %} +{% endhighlight %} + + +Logs for steps hooks are shown in the log window of the step itself. + + {% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/hooks/step-after.png" +url="/images/pipeline/codefresh-yaml/hooks/step-after.png" +alt="Hooks before a pipeline" +caption="Hooks before a pipeline" +max-width="80%" +%} + +As with pipeline hooks, it is possible to define multiple hook conditions for each step. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Compile application" + type: "freestyle" + image: node:10-buster + commands: + - echo "Building application" + step2: + title: "Security scanning" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Security scan" + hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "Authenticating to security scanning service" + on_finish: + exec: + image: alpine:3.9 + commands: + - echo "Uploading security scan report" + on_fail: + exec: + image: alpine:3.9 + commands: + - echo "Sending slack notification" + +{% endraw %} +{% endhighlight %} + +The order of events in the example above is the following: + +1. The `on_elected` segment executes first (authentication) +1. The step itself executes (the security scan) +1. The `on_fail` segment executes (only if the step throws an error code) +1. The `on_finish` segment always executes at the end + + +## Running steps/plugins in hooks + +Hooks can use [steps/plugins](https://steps.codefresh.io). With plugins you have to specify: + +- The type field for the step/plugin. +- The arguments needed for the step/plugin. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" + +hooks: #run slack-notifier hook on build completion + on_finish: + steps: + exec: + type: slack-notifier + arguments: + SLACK_HOOK_URL: '${{SLACK_WEBHOOK_URL}}' + SLACK_TEXT: '${{SLACK_TEXT}}' + +steps: + step1: + title: "Freestyle step" + type: "freestyle" + image: alpine + commands: + - echo "Codefresh" + hooks: #run slack-notifier hook on step completion + on_finish: + steps: + exec: + type: slack-notifier + arguments: + SLACK_HOOK_URL: '${{SLACK_WEBHOOK_URL}}' + SLACK_TEXT: '${{SLACK_TEXT}}' +{% endraw %} +{% endhighlight %} + +## Controlling errors inside pipeline/step hooks + +By default if a step fails within a pipeline, the whole pipeline will stop and be marked as failed. +This is also true for `on_elected` segments as well. If they fail, then the whole pipeline will fail (regardless of the position of the segment in a pipeline or step). However, this only applies to `on_elected` segments. +`on_success`, `on_fail` and `on_finish` segments do not affect the pipeline outcome at all, and a pipeline will continue even if one of these segments fails. + +For example the following pipeline will fail right away, because the pipeline hook fails at the beginning. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "failing on purpose" + - exit 1 +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" +{% endraw %} +{% endhighlight %} + +You can change this behavior by using the existing [fail_fast property]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#execution-flow) inside an `on_elected` hook. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + fail_fast: false + commands: + - echo "failing on purpose" + - exit 1 +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" +{% endraw %} +{% endhighlight %} + +This pipeline will now execute successfully and `step1` will still run as normal, because we have used the `fail_fast` property. You can also use the `fail_fast` property on step hooks as well: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" + hooks: + on_elected: + exec: + image: alpine:3.9 + fail_fast: false + commands: + - echo "failing on purpose" + - exit 1 +{% endraw %} +{% endhighlight %} + + +>Notice that the `fail_fast` property is only available for `on_elected` hooks. The other types of hooks (`on_finish`, `on_success`, `on_fail`) do not affect the outcome of the pipeline in any way. Even if they fail, the pipeline will continue running to completion. This behavior is not configurable. + + +## Using multiple steps for hooks + +In all the previous examples, each hook was a single step running on a single Docker image. You can also define multiple steps for each hook. This is possible by inserting an extra `steps` keyword inside the hook and listing multiple Docker images under it: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: + steps: + mycleanup: + image: alpine:3.9 + commands: + - echo "echo cleanup step" + mynotification: + image: cloudposse/slack-notifier + commands: + - echo "Notify slack" +steps: + step1: + title: "Step 1" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests on test environment" +{% endraw %} +{% endhighlight %} + +By default all steps in a single hook segment are executed one after the other. But you can also run them in [parallel]({{site.baseurl}}/docs/pipelines/advanced-workflows/#inserting-parallel-steps-in-a-sequential-pipeline): + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + step1: + title: "Compile application" + type: "freestyle" + image: node:10-buster + commands: + - echo "Building application" + step2: + title: "Unit testing" + type: "freestyle" + image: node:10-buster + commands: + - echo "Running Integration tests" + - exit 1 + hooks: + on_fail: + mode: parallel + steps: + upload-my-artifact: + image: maven:3.5.2-jdk-8-alpine + commands: + - echo "uploading artifact" + my-report: + image: alpine:3.9 + commands: + - echo "creating test report" +{% endraw %} +{% endhighlight %} + +You can use multiple steps in a hook in both the pipeline and the step level. + + +## Using annotations and labels in hooks + +The hook syntax can also be used as a unified interface for encompassing the existing syntax of [build annotations]({{site.baseurl}}/docs/pipelines/annotations/) and [metadata]({{site.baseurl}}/docs/pipelines/docker-image-metadata/). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: + image: alpine:3.9 + commands: + - echo "Creating an adhoc test environment" + annotations: + set: + - entity_type: build + annotations: + - my_annotation_example1: 10.45 + - my_string_annotation: Hello World +steps: + clone: + title: Cloning source code + type: git-clone + arguments: + repo: 'codefresh-contrib/golang-sample-app' + revision: master + build-image: + type: build + image_name: my-golang-image + working_directory: '${{clone}}' + tag: master + hooks: + on_success: + exec: + image: alpine:3.9 + commands: + - echo "Scanning docker image" + metadata: # setting metadata + set: + - '${{build-image.imageId}}': + - status: 'Success' +{% endraw %} +{% endhighlight %} + +Note however, that if you decide to use annotations and metadata inside hooks, you cannot mix and max the old syntax with the new syntax. + +The following pipeline is **NOT** valid: + +`invalid-codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + test: + image: alpine + on_success: # you cannot use old style together with hooks + annotations: + set: + - entity_type: build + annotations: + - status: 'success' + commands: + - echo block + hooks: + on_success: + annotations: + set: + - entity_type: build + annotations: + - status: 'success' +{% endraw %} +{% endhighlight %} + +The pipeline is not correct, because the first segment of annotations is directly under `on_success` (the old syntax), while the second segment is under `hooks/on_success` (the new syntax). + + +## Syntactic sugar syntax + +To simplify the syntax for hooks, the following simplifications are also offered: + +If you do not want to use metadata or annotations in your hook the keyword `exec` can be omitted: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_finish: # no exec keyword + image: notifications:master + commands: + - ./send_workflow_finished.js +steps: + build: + type: build + image_name: my_image + tag: master + hooks: + on_fail: # no exec keyword + image: notifications:master + commands: + - ./send_build_failed.js +{% endraw %} +{% endhighlight %} + + +If you do not want to specify the Docker image you can simply omit it. Codefresh will use the `alpine` image in that case to run the hook: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: + exec: # no image keyword - alpine image will be used + - echo "Pipeline starting" +steps: + build: + type: build + image_name: my_image + tag: master + hooks: + on_success: # no image keyword - alpine image will be used + exec: + - echo "Docker image was built successfully" + annotations: + set: + - entity_type: build + annotations: + - status: 'Success' +{% endraw %} +{% endhighlight %} + + + If you don't use metadata or annotations, you can also completely remove the `exec` keyword and just mention the commands you want to run (`alpine` image will be used by default): + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +hooks: + on_elected: # no exec/image keyword - alpine image will be used + - echo "Pipeline starting" +steps: + build: + type: build + image_name: my_image + tag: master + hooks: + on_success: # no exec/image keyword - alpine image will be used + - echo "Docker image was built successfully" +{% endraw %} +{% endhighlight %} + +## Using Type Steps / Plugins in hooks + +You can use a type step / plugins in hooks. With this you will need to change `exec` into `steps` with the information needed for the step. + +Below is an example pipeline hook using the `slack-notifier` step/plugin for when the pipeline starts. + +```yaml +hooks: + on_elected: + steps: + exec: + slack_pending: + type: slack-notifier + arguments: + SLACK_HOOK_URL: {% raw %}'${{SLACK_WEBHOOK_URL}}'{% endraw %} + SLACK_TEXT: '*Build Started* :crossed_fingers:' +``` + +## Limitations of pipeline/step hooks + +With the current implementation of hooks, the following limitations are present: + +* The [debugger]({{site.baseurl}}/docs/pipelines/debugging-pipelines/) cannot inspect commands inside hook segments +* Hooks are not supported for [parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +* Storage integrations don't resolve in hooks (for example, [test reports]({{site.baseurl}}/docs/testing/test-reports/#producing-allure-test-reports-from-codefresh-pipelines)) +* Step hook does not support the working_directory field aka `working_directory: ${{clone}}` + +## Related articles +[Conditional execution of steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) +[Working Directories]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Annotations in CI pipelines]({{site.baseurl}}/docs/pipelines/annotations/) + + diff --git a/_docs/pipelines/introduction-to-codefresh-pipelines.md b/_docs/pipelines/introduction-to-codefresh-pipelines.md new file mode 100644 index 00000000..1a16c867 --- /dev/null +++ b/_docs/pipelines/introduction-to-codefresh-pipelines.md @@ -0,0 +1,336 @@ +--- +title: "Introduction to Codefresh pipelines" +description: "Understand how Codefresh pipelines work" +group: pipelines +redirect_from: + - /docs/introduction-to-codefresh-pipelines/ + - /docs/configure-ci-cd-pipeline/ +toc: true +--- + + +The central component of the Codefresh platform for continuous integration (CI) are pipelines. Pipelines are workflows that contain individual steps, with each step responsible for a specific action in the CI process. + +Use CI pipelines to: + +* Compile and package code +* Build Docker images +* Push Docker images to any [Docker Registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +* Deploy applications/artifacts to VMs, Kubernetes clusters, FTP sites, S3 buckets etc. +* Run [unit tests]({{site.baseurl}}/docs/testing/unit-tests/), [integration tests]({{site.baseurl}}/docs/testing/integration-tests/), acceptance tests etc. +* Any custom action that you define + +{% include +image.html +lightbox="true" +file="/images/pipelines/codefresh-yaml/stages/complex-pipeline.png" +url="/images/pipelines/codefresh-yaml/stages/complex-pipeline.png" +alt="Codefresh CI pipelines" +caption="Codefresh CI pipelines" +max-width="90%" +%} + +## Why are Codefresh CI pipelines different? + +Codefresh offers unique characteristics in CI pipelines that serve as the cornerstone of the build/deploy process: + +1. All [steps]({{site.baseurl}}/docs/pipelines/steps/) in Codefresh pipelines are executed inside a Docker container of your choosing. +1. All steps in Codefresh share the same "workspace" in the form of a shared Docker volume. +1. The shared Docker volume is automatically cached between pipeline executions. +1. Every successful pipeline automatically pushes its Docker image to the default Docker registry defined in your account. +1. Codefresh has a distributed Docker cache for all build nodes and caches layers similar to the docker daemon on your workstation. This is fully automated, and does not need to be configured to activate it. + +### Using Docker containers as build tooling + +Unlike traditional solutions, Codefresh was built from the ground up to have full Docker support. All Codefresh pipelines +deal with Docker images, either using them as runtime tools or creating them as deployment artifacts. +Everything that happens in Codefresh uses a Docker image behind the scenes. + +It is important that you understand how to take advantage of Docker-based pipelines as they are much more powerful than +traditional VM solutions. The capability to define your own tooling cannot be understated. It is the fastest way to take +full control of your build tools and to upgrade them easily. + +With traditional VM-based build solutions, you are constrained on the build and deployment tools provided by the vendor. +If for example you need a new version of Node/Java/Python other than the one that is provided on the build slave, you have to wait for your vendor to add it. If you need to use a special tool (e.g terraform, gcloud) and the vendor does +not support it you are out of luck. + +With Codefresh you don't have to care about what is installed in the Runners that execute your builds. They can run *any* Docker image of your choosing. You are free to update the version of the image used at any given time. + +Here is an example: + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/steps-example1.png" +url="/images/pipeline/introduction/steps-example1.png" +alt="Pipeline with three steps" +caption="Pipeline with three steps" +max-width="70%" +%} + + +1. The first step runs under the context of a Node image that prepares the application and runs [unit tests]({{site.baseurl}}/docs/testing/unit-tests/). +1. The second step uses an image with s3 command line tools and uploads the test results to a bucket that holds test reports. +1. The helm step creates a Helm chart and pushes it to a Helm repository. + +You don't need to contact Codefresh and ask them to add the S3 executable on the build runners. You just use a prebuilt Docker image that contains it. The version used for Node is defined by you and if you wish to upgrade to another version +you simply change the definition of the pipeline. + + +Here is another example: + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/steps-example2.png" +url="/images/pipeline/introduction/steps-example2.png" +alt="Codefresh steps example 2" +caption="Pipeline with 4 steps" +max-width="70%" +%} + +1. The first step runs under the context of a Maven image that compiles the code and creates an executable. +1. The second step uses a Docker image that contains terraform and creates a single ECS instance in AWS. +1. The third step uses a custom Docker image that deploys to the ECS container that was just created. +1. The last step uploads the Maven reports that were created in step 1 to an FTP site. + +You should now start seeing the endless possibilities. You can mix and match any Docker image (either a public one +or your own) to use a build context in your step. This makes Codefresh a future-proof solution for all build tools +that exist now and all of them that will appear in the future. As long as there is a Docker image for a tool, Codefresh +can use it in a pipeline without any extra configuration. + +Codefresh also offers a [marketplace](https://codefresh.io/steps/){:target="\_blank"} with several existing plugins. + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-directory.png" +url="/images/pipeline/plugin-directory.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="80%" +%} + + +All plugins in the marketplace are open-source, and we accept external contributions so you can easily add your own. + + +### Sharing the workspace between build steps + +We have seen in the previous section that Codefresh can use Docker images as the context of a build step. The second +important point to understand regarding Codefresh CI pipelines is that the default workspace of each step is shared between all steps in a pipeline. + +This happens via a Docker volume which is attached to all Docker containers that represent each step. This volume is +always available at `/codefresh/volume`, and is used as the parent folder where the project is cloned. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume.png" +url="/images/pipeline/introduction/codefresh-volume.png" +alt="Codefresh volume" +caption="All steps share the same volume" +max-width="90%" +%} + +Anything in this volume is available to all steps of the pipeline (as well as to subsequent executions of the same pipeline as we will see later). + +Again, this places Codefresh ahead of traditional solutions that execute build steps in a completely isolated manner. +In traditional VM-based builds, using artifacts produced from one step in another step, is a complicated process as one +must declare which artifact folders should be re-used. Artifact re-use sometimes happens with compression/decompression +of the respective folder resulting in really slow builds if a project is very big. + +Codefresh does not need to bother the user with artifact reuse across steps. *Anything* that is placed in the shared Codefresh volume will automatically be available to the next steps in the pipeline without any extra configuration. + +Example 1 + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume-example1.png" +url="/images/pipeline/introduction/codefresh-volume-example1.png" +alt="Codefresh volume example 1" +caption="Re-using Node Modules" +max-width="90%" +%} + +1. The first step runs `npm install` and downloads all libraries in `node_modules` into the shared Codefresh volume. +1. The second step runs `npm test`. The folder `node_modules` is still present from the previous step. + +Example 2 + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume-example2.png" +url="/images/pipeline/introduction/codefresh-volume-example2.png" +alt="Codefresh volume example 2" +caption="Re-using Test reports" +max-width="90%" +%} + +1. The first step runs `mvn test` and produces some test reports in `target/surefire-reports` into the shared Codefresh volume. +1. The next step uploads these reports using FTP to an external site. + + +The common volume shared among build steps makes it very easy to create pipelines that work in a gradual manner +where any step in the pipeline is using artifacts produced by a previous one. + +>The shared volume is **NOT available** in [build steps]({{site.baseurl}}/docs/pipelines/steps/build/). This is not a Codefresh limitation. Docker itself [does not allow volumes during builds](https://github.com/moby/moby/issues/14080){:target="\_blank"}. There is no folder `/codefresh/volume` inside a Dockerfile for you to access. + +You can also use [environment variables]({{site.baseurl}}/docs/pipelines/variables/) to share information between steps. All predefined environment variables are available to all steps, and each individual step can use `cf_export` to dynamically inject extra environment variables during the build process. + + +## Working with Codefresh pipelines + +Now that we know the basics, we can see how you can take advantage of Docker-based pipelines in order to build and deploy your projects. + + +### Cloning the source code + +You can clone source code using the built-in [Git-clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) as the first step in a CI pipeline, or manually run your own Git clone commands in a freestyle step. Codefresh has built-in [Git integration]({{site.baseurl}}/docs/integrations/git-providers/) with all popular git providers (both cloud and on-premises installations). + +Codefresh uses the shared volume as the parent folder of the project. So if your pipeline is connected to a Git repo that contains `my-project` the following will happen: + +* `/codefresh/volume` is the shared directory for all steps +* `/codefresh/volume/my-project` is where the source code exists. This is also the current working directory +* Any other directory (e.g. `/bin`, `/var`, `/opt`) depends on the current container image that is used as build context + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/checkout.png" +url="/images/pipeline/introduction/checkout.png" +alt="Codefresh checkout folder" +caption="Codefresh checkout folder" +max-width="80%" +%} + +There are three important points to consider regarding these folders: + +1. The [working directory]({{ site.baseurl }}/docs/pipelines/what-is-the-codefresh-yaml/#working-directories) of each step is by default the project folder (e.g. `/codefresh/volume/my-project`). Therefore +your build step can run commands exactly as you would run them locally (e.g. `npm install, pip install, mvn package, bundle install`). + +1. Notice that the project folder is placed on the Codefresh volume, so by default it is also available to all other steps. The code that you check out in the beginning, as well as all other files that are created on it, are available to all steps. Once you create `node_modules`, or any other folder that exists inside the project folder, it will automatically persist for all other steps. + +1. Finally, `/codefresh/volume` is an internal folder name, and you should use `{% raw %}${{CF_VOLUME_PATH}}{% endraw %}` in your codefresh.yml file +if you really want to reference this folder. You can also reference your project folder as `{% raw %}${{CF_VOLUME_PATH}}/${{CF_REPO_NAME}}{% endraw %}` if you need it. + +See the [System Provided Variables]({{site.baseurl}}/docs/pipelines/variables/#system-provided-variables) section for more information. + +### Working with Docker inside a Codefresh pipeline + +We have already seen that Codefresh pipelines are based on Docker images and that each step runs inside the context of a Docker container. You might be wondering how you can run Docker commands directly inside a Codefresh pipeline. + +The answer is that you don't. Even though in the future Codefresh might allow for Docker-in-Docker capabilities, at the moment this is not supported for security reasons (only enterprise customers have access to the underlying Docker daemon). Any scripts that you already have that run Docker commands on their own will need to be adapted to Codefresh pipelines. + +Usually you want to run a docker command for four reasons: + +1. To build a Docker image +1. To push a Docker image +1. To run a docker-compose setup +1. To run a Docker container + +For all these situations Codefresh gives you special pipeline steps that perform the respective action. These are: + +1. The [build step]({{site.baseurl}}/docs/pipelines/steps/build/) +1. The [push step]({{site.baseurl}}/docs/pipelines/steps/push/) +1. The [compositions step]({{site.baseurl}}/docs/pipelines/steps/composition/) +1. The [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + +The commands you define in a freestyle step run automatically in a Docker container that is attached to that step once the pipeline executes. + +Therefore, this command on your local workstation: + +``` +docker run python:3.6.4-alpine3.6 pip install . +``` + +will become in Codefresh + +``` +CollectAllMyDeps: + title: Install dependencies + image: python:3.6.4-alpine3.6 + commands: + - pip install . +``` +For the plugins in the [Step Marketplace](https://codefresh.io/steps/) we already give an example of the YAML part that must be included in your pipeline: + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-example.png" +url="/images/pipeline/plugin-example.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="50%" +%} + +Each plugin also defines its input/output in the form of environment variables and files. + +### Creating Docker images dynamically as build tools + + +Now we reach one of the most powerful features of Codefresh pipelines. We have already seen that [freestyle pipeline steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) are just a series of commands that run inside the context of a Docker container. In most cases the images used +for the freestyle steps are known in advance and come from public (e.g. Dockerhub) or [private Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). + +Codefresh is one the few CI/CD solutions that not only offers easy Docker registry integration + accessible to all pipelines +but also allows you to **build docker images on demand in the same pipeline where they are required**. + +This means that you can create a special Docker image in an early step inside a Codefresh pipeline and then reference it in a later step in the same pipeline. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/dynamic-docker-builds.png" +url="/images/pipeline/introduction/dynamic-docker-builds.png" +alt="Codefresh dynamic docker builds" +caption="Creating dynamically Docker images as build steps" +max-width="90%" +%} + +Let's say for example that you are moving a legacy application to Codefresh which is deployed using a special Python script. Your main application is a Ruby-On-Rails app. Both applications exist in the same git repository (we mention this for simplicity reasons, Codefresh also supports checking out code from multiple repositories). + +You can create a single pipeline with Codefresh that does the following: + +1. Checks out the code +1. Creates a Docker image based on Python for the deployment tool +1. Uploads the Python tool Docker image to the internal registry +1. Builds the Ruby application using a freestyle step with the R-O-R image from Dockerhub +1. Deploys the Ruby application by running the Python based deployment tool image (after pulling it first) + +This concept is ground-breaking as it allows you to automatically update your build tools that are used in any pipeline. +Even though you could manually create the Docker images yourself before-hand, it is better to completely automate them +inside the pipeline they are actually needed. This ensures that both the application and its tooling are always at the latest version. + +### How caching works in Codefresh + +Codefresh employs several caching mechanisms for both Dockerized and non-dockerized applications. The shared volume is also cached behind the scenes automatically. See our [caching guide]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipeline-caching/) for more details. + +### Calling other pipelines + +It is also possible to chain multiple pipelines together in Codefresh. To accomplish this, Codefresh offers +a special Docker image that contains the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"} and allows you to trigger another pipeline using its name. + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/call-pipeline.png" +url="/images/pipeline/introduction/call-pipeline.png" +alt="Codefresh call pipeline" +caption="Calling another pipeline" +max-width="80%" +%} + +Notice that each pipeline in Codefresh is completely isolated from the other. They use a different Docker volume so the build context of each one cannot access files from the other. This may change in the future, but for the time being +you should know that only steps within the same pipeline can share artifacts. + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Build and Docker caching]({{site.baseurl}}/docs/pipelines/pipeline-caching/) + + + diff --git a/_docs/pipelines/marketplace.md b/_docs/pipelines/marketplace.md deleted file mode 100644 index 4295f74e..00000000 --- a/_docs/pipelines/marketplace.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Codefresh marketplace" -description: "" -group: pipelines -toc: true ---- - -The Codefresh Hub for Argo documentation can be found in its [official repository](https://github.com/codefresh-io/argo-hub). - -Codefresh is fully backing this project and will help all developers that want to contribute to succeed. - -You can find documentation about how to contribute to the argo hub in the [official repository contribute section](https://github.com/codefresh-io/argo-hub#How-to-Contribute) - diff --git a/_docs/pipelines/monitoring-pipelines.md b/_docs/pipelines/monitoring-pipelines.md new file mode 100644 index 00000000..b75b8440 --- /dev/null +++ b/_docs/pipelines/monitoring-pipelines.md @@ -0,0 +1,463 @@ +--- +title: "Monitoring CI pipelines" +description: "Viewing your builds and logs" +group: pipelines +toc: true +--- + + +All pipeline activity in Codefresh can be viewed in the *Builds* tab. +* The global build view shows builds for all projects across your organization +* The project-based view from the settings inside an individual project shows the builds for the selected project + +Both views have the same controls and filters. + +## Viewing pipeline status + +Each screen contains all builds sorted from the most recent to the oldest. The first time you visit +the screen there are no filters defined. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/builds-dashboard.png" +url="/images/pipeline/monitoring/builds-dashboard.png" +alt="Pipeline Activity in Codefresh" +caption="Pipeline activity" +max-width="80%" +%} + +By default, it shows all builds that is happening in Codefresh. To narrow the list you can use the filters on the top +of the screen. + +### Applying filters on the build view + +Directly above the list you can find several filters. + +At the most basic level you can choose between + + * *Running* builds that are currently executing + * *Pending* builds which are queued and waiting to start + * *Delayed* builds which cannot run yet, because there are no free pipeline builders. + A build can be delayed for a maximum of seven days, and each account can have up to 1000 delayed builds at any time. + * Builds that are delayed for more than seven days are terminated with a _Delay time limit exceeded_ reason. + * If the total number of delayed builds exceed 1000, older builds are terminated with a _Maximum delayed workflows exceeded_ reason. + + * *All* builds regardless of running stage (this is the default) + +You can further filter the builds by choosing the various filter types that specify the build job. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-filtering.png" +url="/images/pipeline/monitoring/build-filtering.png" +alt="Pipeline filters in Codefresh" +caption="Available filters" +max-width="50%" +%} + +The available filters are: + +* *Pipeline* - any of the pipelines available. +* *Provider* - type of [Git provider]({{site.baseurl}}/docs/integrations/git-providers/). +* *Repository* - Git repository from the attached [trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/). +* *Type* - build, [launch a test environment]({{site.baseurl}}/docs/getting-started/on-demand-environments/#launching-a-docker-image-using-codefresh). +* *Branch* - any of the available branches from the attached Git trigger. +* *Committer* - person that made the commit that triggered the build. +* *Environment* - which [environment]({{site.baseurl}}/docs/deploy-to-kubernetes/environment-dashboard/) was affected. +* *Status* - success, error, in-progress, pending, terminated etc. A Pending status can also indicate that [pipeline build execution has been paused]({{site.baseurl}}/docs/administration/pipeline-settings/#pause-pipeline-executions) for the account. +* *Trigger type* - what type of trigger was responsible for this build +* *Git event* - in the case of [git triggers]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) the exact event + +Notice that all filters are multiple-choice so you can select multiple values for each filter category. +At any given point you can see all the active filters on top of the screen. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/possible-filters.png" +url="/images/pipeline/monitoring/possible-filters.png" +alt="Pipeline filters in Codefresh" +caption="Active filters" +max-width="50%" +%} + +You can easily remove active filters, by clicking on them and adding/removing values. + +On the right hand side you can also find a filtering toolbar with time options: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-filter-date.png" +url="/images/pipeline/monitoring/build-filter-date.png" +alt="Filtering options for time" +caption="Filtering options for time" +max-width="60%" +%} + +You can combine all previously mentioned filters with the time based filters. + +### Creating build views + +Once you have a set of filters that you use regularly, you can save them as a custom *Build View* by clicking the *Save as View* button +and providing a name. + +Now you can select at the top of the window any of the available build views to automatically filter results according to the respective sets of filters. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-view-selection.png" +url="/images/pipeline/monitoring/build-view-selection.png" +alt="Build View selection" +caption="Build View selection (click to enlarge)" +max-width="50%" +%} + +You can delete existing build-views by clicking on the *manage views* button. +You can change the filters of an existing build view by making a new filter selection and then saving the view with an existing name (effectively overwriting it). + + +### Build details + + +For each individual build you can see several details such as the git hash, the person who made the commit, the pipeline that was triggered as well as how much time it took. For each event type you will also see additional context related information. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-details-entry.png" +url="/images/pipeline/monitoring/build-details-entry.png" +alt="build details in Codefresh" +caption="Build details" +max-width="100%" +%} + +Child builds triggered by other builds are identified in the Event column by the icon {::nomarkdown} {:/}. +The Parent Build column shows the link to the parent build. Mouse over to see the tooltip with information on the parent build. The tooltip includes links to the parent build, repo, branch, commit message, and the ability to filter by repo and branch. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/child-parent-build-info.png" +url="/images/pipeline/monitoring/child-parent-build-info.png" +alt="Child build in Builds list" +caption="Child build in Builds list" +max-width="70%" +%} + +There are also extra options if you click the small "3-dot" menu button on the right. For a particular build, you can: + +- View the logs +- View the YAML +- View or add [annotations]({{site.baseurl}}/docs/pipelines/annotations/) +- View the images produced (and consequently launch an on-demand [test environment]({{site.baseurl}}/docs/getting-started/on-demand-environments/#launching-a-docker-image-using-codefresh)) + +Notice that if you restart a pipeline it will trigger with the exact settings it *originally* had. So +if this was a manual trigger where you [disabled caching]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) or changed the [notification options](#monitoring-pipelines-that-check-pull-requests), the new +execution will still honor those settings (even if you have changed them for later builds). + +An extra button for test reports will be visible if you are using the [test report feature]({{site.baseurl}}/docs/testing/test-reports/) of Codefresh. + + +## Viewing details for an individual pipeline build + +If you click on any individual pipeline build, you will enter the pipeline build information screen. +From here you can see more details for a build such as the logs, running time and resource metrics. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/pipeline-view.png" +url="/images/pipeline/monitoring/pipeline-view.png" +alt="Pipeline view" +caption="Pipeline view" +max-width="80%" +%} + +Each section in this screen corresponds to each pipeline step. There are two special steps: + +* *Initializing Process* +* *Cloning Main Repository* + +These are Codefresh built-in steps and will appear for most builds (you can also create a pipeline that doesn't clone a git repository by default). The rest of the step names depend on your `codefresh.yml` (or the default step names provided by Codefresh). The different columns take the names from the defined [pipeline stages]({{site.baseurl}}/docs/pipelines/stages/). + +### Viewing status for pipeline steps + +Monitor the status of the steps in the pipeline as they are executed. + +{: .table .table-bordered .table-hover} +| Step Status Icon | Description | +| ------------------------| ---------------- | +|{::nomarkdown} {:/}| Pipeline step completed successfully. | +|{::nomarkdown} {:/}| Pipeline step pending approval has been approved, either manually or automatically. | +|{::nomarkdown} {:/}| Pipeline step pending approval has been denied approval. | +|{::nomarkdown} {:/}| Pipeline step currently running. | +|{::nomarkdown} {:/}| Pipeline step running in debug mode. See [Debugging pipelines]({{site.baseurl}}/docs/configure-ci-cd-pipeline/debugging-pipelines/) for more information. | +|{::nomarkdown} {:/}| Pipeline step gracefully terminating execution. | +|{::nomarkdown} {:/}| Pipeline step execution has been manually or automatically terminated. | +|{::nomarkdown} {:/}| Pipeline step execution has been terminated because of error. | + + + +### Viewing/downloading logs for builds and build steps + +View logs for running and completed builds and download them in HTML or text formats. +You can view logs online, for the entire build or for single or specific steps in the build. Similarly, you can download the logs for the entire build, or for single or specific steps. +The Filter Logs option is useful to view and manage logs, especially for large builds as there is a max size limit for logs. You can also search logs. + +>Note: + The max log size for the entire build is 100MB, and 20MB per step. The system stops generating logs once the build size is exceeded. + For large builds, it is easier to filter the logs by single or multiple steps, and then view/download them. + +1. In the **Builds** page, select a build. +1. To view logs online for the selected build, click **Output** in the lower part of the Build page. +1. Optional. Select **Filter Logs** and then select the step or steps for which view/download logs. + Logs are displayed for the selected steps. +1. From either the context menu on the top-right of the toolbar or from the Output pane, select **Download as HTML** or **Download as text**. + The log file is downloaded with the build ID as the filename, including also the step name if the log is for a single step, in the format `'. + + {% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-logs.png" +url="/images/pipeline/monitoring/build-logs.png" +alt="Build log in Codefresh" +caption="Build log in Codefresh" +max-width="60%" +%} + + +### Viewing variables in pipeline builds + +Variables, both system (environment) and custom (user-defined), are injected into pipelines from different sources and at different levels. +The variables actually used by a specific build of the pipeline varies according to the events that triggered the pipeline. +Select a build to view all its variables, and identify their source, and overrides if any. + +1. In the **Builds** page, either select the build and then open the context-menu, or open the context-menu on the right of the build entry. +1. Select **Variables**. + + {% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-variables-view-option.png" +url="/images/pipeline/monitoring/build-variables-view-option.png" +alt="Variables option in context menu of build entry" +caption="Variables option in context menu of build entry" +max-width="70%" +%} + +{:start="3"} +1. If required, click the Sort icon for the group to sort in alphabetical order. +1. To copy the group's variables to the clipboard, click the Copy icon. + + +Here's an example of the list of variables for a pipeline build. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-variables-list.png" +url="/images/pipeline/monitoring/build-variables-list.png" +alt="List of variables in selected build" +caption="List of variables in selected build" +max-width="50%" +%} + +The variables are grouped by granularity, starting with the global project-level variables and ending with the trigger-level variables with the highest granularity: +* Project +* Shared configuration +* Pipeline +* Trigger + +A variable with a strikethrough indicates an override by the same variable in a lower-level group. For rules on precedence and overrides for variables in builds, see [Variables]({{site.baseurl}}/docs/pipelines/variables/). + +>Notes: + * Variables exported across steps with `cf_export` are not identified as `cf-exported` variables in the list. + * Secret-type variables are always masked. + + + +### Reviewing the yaml for the pipeline + +From the step details you can also click on the yaml tab to see the yaml segment for that individual step: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/yaml-from-step.png" +url="/images/pipeline/monitoring/yaml-from-step.png" +alt="Step Yaml" +caption="Step Yaml" +max-width="60%" +%} + +If you want to see the yaml for the whole pipeline, +- Click the *YAML* tab on the bottom left corner without selecting a step first or +- Select the three dots next to the "RESTART" button on the top-right, and click on *Show YAML* + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/view-pipeline-yaml.png" +url="/images/pipeline/monitoring/view-pipeline-yaml.png" +alt="Pipeline Yaml" +caption="Pipeline Yaml" +max-width="60%" +%} + +In both cases you can copy to clipboard the yaml shown using the button at the top left corner. + +### Viewing pipeline metrics + +Codefresh offers several metrics for pipeline steps that allow you to get a better overview on the resources +consumed by your pipeline. + +At the most basic level Codefresh will show some quick metrics while the pipeline is running that include +memory consumed and size of logs: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/quick-pipeline-metrics.png" +url="/images/pipeline/monitoring/quick-pipeline-metrics.png" +alt="Pipeline running metrics" +caption="Pipeline running metrics" +max-width="70%" +%} + +You can then get the memory usage for the whole pipeline by clicking on the metrics tab at the bottom of the screen. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/pipeline-metrics.png" +url="/images/pipeline/monitoring/pipeline-metrics.png" +alt="Pipeline detailed metrics" +caption="Pipeline detailed metrics" +max-width="70%" +%} + + +If you click on an individual step before clicking the *Metrics* tab you will get metrics for that specific step only. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/step-metrics.png" +url="/images/pipeline/monitoring/step-metrics.png" +alt="Step metrics" +caption="Step metrics" +max-width="70%" +%} + + +### Restarting the pipeline + +You can choose to restart any pipeline by clicking the button at the top right corner. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/restart-pipeline.png" +url="/images/pipeline/monitoring/restart-pipeline.png" +alt="Restart a pipeline" +caption="Restart a pipeline" +max-width="70%" +%} + +>It is important to note that "Restart from beginning" will restart a pipeline with the **same** state that it had in its original execution (including the original git commit). If you want to execute a pipeline again with a new state instead, you need to use the *Run* button in the [pipeline editor]({{site.baseurl}}/docs/pipelines/pipelines/#using-the-inline-pipeline-editor) and selecting any of the available [triggers]({{site.baseurl}}/docs/pipelines/triggers/). + + + +If the pipeline has failed, you can choose to restart it only from the failed step and onwards. + +You can also restart from a failed step right from the graphical view: + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/restart-failed.png" +url="/images/pipeline/monitoring/restart-failed.png" +alt="Restart from a failed step" +caption="Restart from a failed step" +max-width="70%" +%} + +>Notice again that restarting a pipeline from a failed step means restarting the pipeline with the **same** state that it had at the point in time (including the original git commit). + +If your pipeline has some flaky steps, you can also use the [retry syntax]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#retrying-a-step) in your yaml instead of restarting them manually each time they fail. + + +## Monitoring pipelines outside the Codefresh UI + +You don't always have to be in the Codefresh UI in order to monitor the status of your builds. + + +### Monitoring pipelines that check Pull Requests + +One of the most +important roles of a CI platform is to automatically update the status of a GIT Pull request with the result +of the respective build. + +{% include +image.html +lightbox="true" +file="/images/getting-started/quick-start-test-pr/auto-build-pr.png" +url="/images/getting-started/quick-start-test-pr/auto-build-pr.png" +alt="Pull Request Status" +caption="Pull Request Status (click image to enlarge)" +max-width="50%" +%} + +If you have setup a [GIT trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/) in Codefresh then by default this happens automatically without any other configuration +for all automated commits (that are coming from webhooks). + +If you start a build manually then by default the git status will **not** be updated (i.e. the result of the pipeline +will not affect the status of Pull request) + +If you don't want this behavior to happen, you can enable the git status update checkbox when you launch a pipeline. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/report-notification-checkbox.png" +url="/images/pipeline/monitoring/report-notification-checkbox.png" +alt="Update git status for pipelines triggered manually " +caption="Update git status for pipelines triggered manually (click image to enlarge)" +max-width="50%" +%} + +This way the pipeline status *will* change the build status even with manual builds. + +The same behavior is also available to the [Codefresh CLI](https://codefresh-io.github.io/cli/pipelines/run-pipeline/). In that case use the parameter `--enable-notifications` +to specify if manually triggering a build will also change the GIT status. + +For open source projects you also have the ability to [trigger builds from external forks]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#support-for-building-pull-requests-from-forks). + +### Viewing pipeline status from text/html files + +Codefresh also supports build badges that allow you to show the +status of a Pipeline in Text files or web pages. + +{% include +image.html +lightbox="true" +file="/images/pipeline/monitoring/build-badge.png" +url="/images/pipeline/monitoring/build-badge.png" +alt="Codefresh build badges" +caption="Codefresh build badges" +max-width="100%" +%} + +See the [build badges page]({{site.baseurl}}/docs/pipelines/build-status/) for more information. + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Test report]({{site.baseurl}}/docs/pipelines/test-reports/) +[Status badges]({{site.baseurl}}/docs/pipelines/build-status/) diff --git a/_docs/pipelines/pipeline-caching.md b/_docs/pipelines/pipeline-caching.md new file mode 100644 index 00000000..5e4e4189 --- /dev/null +++ b/_docs/pipelines/pipeline-caching.md @@ -0,0 +1,314 @@ +--- +title: "Caching in CI pipelines" +description: "Faster builds with Codefresh caching" +group: pipelines +toc: true + +--- + +One of the unique features of Codefresh is the multitude of caching systems that take part in a pipeline, and in particular the caching mechanisms targeted specifically at Docker builds. Most types of caching are completely automatic and require zero configuration in order to activate. Caching is a built-in feature in all Codefresh accounts regardless of pricing tier (even free accounts have all types of caching enabled). + +## Types of caching + +Here is a quick overview of all types of caching used in a Codefresh pipeline: + +{: .table .table-bordered .table-hover} +| Caching mechanism | Activation | Used in | Comments | +| -------------- | ---------------------------- |-------------------------| -------------------------| +| Distributed Docker step/image caching | Automatic | All pipeline [steps]({{site.baseurl}}/docs/pipelines/steps/) | | +| Distributed Docker layer caching | Automatic | Pipeline [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) | Mimics local Docker layer cache| +| Caching from previous built image | Automatic | Pipeline build steps | Distributed version of `--cache-from`| +| Docker registry caching | Automatic | Pipeline build steps | Works for all [connected Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/)| +| Traditional build caching | Automatic/manual | Pipeline [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) | See notes for [parallel builds]({{site.baseurl}}/docs/pipelines/advanced-workflows/)| + +All these caching mechanisms are enabled by default and you can [freely disable them]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) if you encounter any issues with caching. + +Let's see these caches in order and how to use them effectively. + +## Distributed Docker image caching + +This is the simplest mode of caching available. All Codefresh steps [are in fact docker images]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/). Once a pipeline runs for the first time, Codefresh will pull all required images from their registries (either public or private) and will cache them for the next build: + + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/image-caching.png" +url="/images/pipeline/caching/image-caching.png" +alt="Caching pipeline steps" +caption="Caching pipeline steps" +max-width="60%" +%} + +The next time the pipeline runs all images will be fetched from cache. This includes built-in steps (e.g the [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/)), custom steps from [the marketplace](https://codefresh.io/steps/) or your own [dynamic pipeline steps]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#creating-docker-images-dynamically-as-build-tools). + +This cache mechanism is completely automatic and is not user configurable. Some ways that you can affect it are: + +* If you use well known images in your pipeline (such as `alpine`, `node`, `maven` etc) they have more probabilities to be already cached by the Codefresh platform +* Use specific tags for your images (e.g. `alpine:3.9.2` and `maven:3-jdk-11-openj9`) instead of generic ones (e.g `alpine:latest` and `node:buster`) that change all the time +* Using small images in the pipeline will make caching/restoring of pipeline steps much faster. + + +You can see in the [pipeline build logs]({{site.baseurl}}/docs/pipelines/steps/build/) if the images of your steps are found in cache or not. Here is an example of a cache hit: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/image-cache-hit.png" +url="/images/pipeline/caching/image-cache-hit.png" +alt="Docker image cache hit" +caption="Docker image cache hit" +max-width="50%" +%} + +and a cache miss: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/image-cache-miss.png" +url="/images/pipeline/caching/image-cache-miss.png" +alt="Docker image cache miss" +caption="Docker image cache miss" +max-width="50%" +%} + +This cache mechanism is applicable to all Codefresh pipelines and steps. + + +## Distributed Docker layer caching + +This type of caching is **only** applicable to [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) and mimics the ways docker layer caching behaves locally on your workstation. + +When you build images locally, Docker will cache intermediate layers making future builds much faster. You can see when caches are used in your build logs. + +{% highlight shell %} +{% raw %} +> docker build . -t my-app +Sending build context to Docker daemon 81.92kB +Step 1/10 : FROM golang:1.12-alpine + ---> 6a17089e5a3a +Step 2/10 : RUN apk add --no-cache git + ---> Using cache + ---> 7b65bc6a6690 +Step 3/10 : WORKDIR /app/go-sample-app + ---> Using cache + ---> 8755d1490fe2 +Step 4/10 : COPY go.mod . + ---> Using cache + ---> 476d868ceddd +Step 5/10 : COPY go.sum . + ---> Using cache + ---> 3239097e9bde +[...] +{% endraw %} +{% endhighlight %} + +In a distributed build environment however, things work much differently as each build node has its own cache. If you run a pipeline on one node and then run a second build on another node everything will be recreated again because (normally) build nodes don't share any cache. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/no-distributed-layer-cache.png" +url="/images/pipeline/caching/no-distributed-layer-cache.png" +alt="Without a distributed docker layer cache" +caption="Without a distributed docker layer cache" +max-width="60%" +%} + +In the example above if you run another build that is picked up by build node 18 all Docker filesystem layers will be recreated again even though they are already present in other nodes. + +Codefresh is one of the few CI/CD solutions that has a *distributed* Docker layer cache. This makes layer caching available to all build nodes. It doesn't matter any more which build node runs which pipeline as all of them are equal regarding their caching capabilities. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/distributed-layer-cache.png" +url="/images/pipeline/caching/distributed-layer-cache.png" +alt="Wit a distributed docker layer cache" +caption="With a distributed docker layer cache" +max-width="60%" +%} + +With the distributed docker layer cache all build nodes are now equal. Any of the available nodes can pick your next pipeline build as all of them have access to all the previous docker filesystem layers. + +You can see if this cache is used in your [pipeline logs]({{site.baseurl}}/docs/pipelines/steps/build/): + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/distributed-docker-layer-cache.png" +url="/images/pipeline/caching/distributed-docker-layer-cache.png" +alt="Docker layer caching regardless of build node" +caption="Docker layer caching regardless of build node" +max-width="60%" +%} + +Codefresh will also automatically pass the `--cache-from` directive to docker builds with the previous successful build artifacts: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/cache-from.png" +url="/images/pipeline/caching/cache-from.png" +alt="Distributed version of `--cache-from`" +caption="Distributed version of `--cache-from`" +max-width="60%" +%} + +To take advantage of this build cache just follow the official Docker guidelines and best practices such as + +* Download dependencies in a separate docker layer +* Put layers that will not change frequently at the top of dockerfile (e.g. OS libs) +* Put things that will change frequently at the bottom of the dockerfile (e.g. source code) +* Don't use side effects in Dockerfiles + +Basically, if your Dockerfile is already optimized on your local workstation, it should also be optimized for Codefresh. More information can be found in the official documentation: + +* [https://www.docker.com/blog/intro-guide-to-dockerfile-best-practices/](https://www.docker.com/blog/intro-guide-to-dockerfile-best-practices/) +* [https://docs.docker.com/develop/develop-images/dockerfile_best-practices/](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) + +## Docker registry caching + +This is a caching mechanism unique to Codefresh and applicable only to [build steps]({{site.baseurl}}/docs/pipelines/steps/build/) when any of [connected Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) is used. + +Codefresh will check the internal Docker registry *before* a build step and if the exact same image is found (using the image hash), it will skip the build step completely: + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/skip-build.png" +url="/images/pipeline/caching/skip-build.png" +alt="Skipping a previously built Docker image" +caption="Skipping a previously built Docker image" +max-width="60%" +%} + +This is a very effective way to cut down the amount of time needed by pipelines but it obviously works only for Docker images that don't change often (helper images, plugins, build tools etc.) as the deployment docker images will always be different when a new git commit happens in the source code. + +You can take advantage of this mechanism by [not mixing deployment docker images with development docker images](https://codefresh.io/containers/docker-anti-patterns/). The former will change all the time, while the latter should be recreated less often. + +## Traditional build caching + +If you have read the [introduction to pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) page you will already be familiar with the shared volume that is automatically mounted on all pipeline steps. This volume is not only used for data exchange between steps of the same pipeline, but is also stored/fetched for each subsequent build as well. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/pipeline-volume-caching.png" +url="/images/pipeline/caching/pipeline-volume-caching.png" +alt="Pipeline workspace caching" +caption="Pipeline workspace caching" +max-width="90%" +%} + +This means that unlike other CI solutions where you have to manually describe what folder you wish to cache, in Codefresh **everything that exists in `/codefresh/volume` and its subfolders is automatically cached between different builds** of the same pipeline. The volume mounting and caching/restoring process is completely automatic. You don't need any configuration about it. + +The main choice that you have is which files to place on the volume. For example, Node.js uses the folder `node_modules` for its dependencies which are placed under the project folder [which is automatically placed under the volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code). So all contents of `node_modules` will be cached by default without any further action on your part. + +>Note that if you are using [Codefresh on-prem]({{site.baseurl}}/docs/installation/codefresh-on-prem/), this kind of caching is not available for the built-in runtime and you need to use the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) +with your own runtime to activate volume caching. + +The simplest way to see this caching mechanism in action is this pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + write_sample_file: + title: Writing to shared volume + image: alpine:3.10.3 + commands: + - date >> /codefresh/volume/sample.txt + read_sample_file: + title: Reading from shared volume + image: alpine:3.10.3 + commands: + - cat /codefresh/volume/sample.txt +{% endraw %} +{% endhighlight %} + +If you run this pipeline multiple times you will see multiple entries in the file `sample.txt`. + +>Note that if you run concurrent builds too quickly after one another, the Codefresh Volume will refresh [from scratch]({{site.baseurl}}/docs/pipelines/pipeline-caching/#issues-with-parallel-builds-and-parallel-pipelines) instead of being cached between builds. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/codefresh-shared-volume.png" +url="/images/pipeline/caching/codefresh-shared-volume.png" +alt="Shared volume after 3 builds of the same pipeline" +caption="Shared volume after 3 builds of the same pipeline" +max-width="60%" +%} + +Notice also the complete lack of `volume` directives in the `codefresh.yml` file. The pipeline volume is mounted and cached/restored by Codefresh with no configuration on your part. + +Some important points on this caching mechanism: + +* The volume is handled and managed by Codefresh in a completely transparent manner. You **DO NOT** need any `volume` directives in your pipelines to take advantage of it. The volume is even present in [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) for integration tests. +* On each build the [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) will purge/delete everything that is not placed in `.gitignore`. So make sure that your `.gitignore` files contain all the things that you want to see cached (e.g. `node_modules`) +* If you use the SAAS version of Codefresh, volumes will be reused across all your account pipelines. If you use the On-prem or Hybrid version of Codefresh, pipeline volumes can be scoped to different pipelines or triggers as well +* You need at least one build of your pipeline in order for the cache mechanism to take any effect. +* The volume is **NOT available** in [build steps]({{site.baseurl}}/docs/pipelines/steps/build/). This is not a Codefresh limitation. Docker itself [does not allow volumes during builds](https://github.com/moby/moby/issues/14080). There is no folder `/codefresh/volume` inside a Dockerfile for you to access. +* This is the only caching mechanism that is not related to Docker images. So if you compile/package a traditional application with Codefresh that is not packaged as a Docker image this is the only way to get faster builds. + +See also a [full example]({{site.baseurl}}/docs/yaml-examples/examples/shared-volumes-between-builds/) that uses the volume at [https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds](https://github.com/codefreshdemo/cf-example-shared-volumes-between-builds). + +### Caching folders which are outside your project folder + +By default if you checkout a Git project named `foo`, the source code [is placed under]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) `/codefresh/volume/foo`. This means that with zero configuration the following things are cached: + +* your source code of `foo` project +* all dependencies under the project folder (e.g. `foo/node_modules`) +* all project logs, test results that are inside the project module. + +Everything else found in external folders is NOT cached by default. So if you have things in folders such as `/root`, `/tmp/`, `/home/`, `/var/` that you need to cache you need to manually copy them to the volume. + +In practice, this means that you need to look at the documentation of your build system and test framework and make sure that all folders you want cached are placed under the Codefresh volume. This is a typical pattern with Java applications. + + * For Maven use `mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package` as shown in the [example]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/). + * For Gradle use `gradle -g /codefresh/volume/.gradle -Dmaven.repo.local=/codefresh/volume/m2` as explained in the [example]({{site.baseurl}}/docs/learn-by-example/java/gradle/). + * For SBT use `-Dsbt.ivy.home=/codefresh/volume/ivy_cache`. + * For Pip use `pip install -r requirements.txt --cache-dir=/codefresh/volume/pip-cache` as shown in the [example]({{site.baseurl}}/docs/learn-by-example/python/django/) + * For Golang pass an environment variable `GOPATH=/codefresh/volume/go` to the freestyle step that is running go commands + * For Rust pass an environment variable `CARGO_HOME=/codefresh/volume/cargo` to the freestyle step that is running rust/cargo commands + + This is only needed for traditional applications that are not dockerized. If you already use Docker containers the previous caching mechanisms are already enough. + +### Issues with parallel builds and parallel pipelines + +Codefresh supports two forms of parallelism, parallel steps within the same pipeline and parallel pipelines (as well as concurrent builds). + +All parallel steps inside the same pipeline use the same volume. Codefresh [does not perform any conflict detection in that case]({{site.baseurl}}/docs/pipelines/advanced-workflows/#shared-codefresh-volume-and-race-conditions). + +For concurrent builds of the same pipeline, notice that if you make too many commits very fast (triggering a second build while the previous one is still running), Codefresh will allocate a brand new volume for the subsequent builds. This will force all builds to start with a clean shared volume, resulting in longer build times. Be sure to set your [build termination settings]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-settings) correctly. + +{% include image.html +lightbox="true" +file="/images/pipeline/caching/concurrent-build-caching.png" +url="/images/pipeline/caching/concurrent-build-caching.png" +alt="Concurrent build caching" +caption="Concurrent build caching" +max-width="80%" +%} + +The diagram above shows the following sequence of events: + +1. The first build of a pipeline is triggered. Codefresh allocates a brand new volume and automatically mounts is as a workspace at `/codefresh/volume`. +1. The first build runs and stores artifacts on the volume +1. The first build finishes. Codefresh stores the volume in the cache +1. A second build is triggered for the same pipeline and same git branch. Codefresh sees that there is already a volume in the cache and passes it to the second build. The second build correctly finds all artifacts in the cache +1. *Before the second build finishes*, a third build is triggered. +1. The pipeline volume is still locked by the second build and Codefresh cannot use it in the third build. Codefresh allocates a **brand new volume** that has no artifacts at all and passes it to the third build +1. The second build finishes and its volume is saved into cache +1. The third build finishes and its volume is saved into cache *overwriting* the volume of the second build. +1. If a fourth build starts it will use the volume from the third build since this was the last saved volume. + + + +## Codefresh cache size and eviction policy + +If you use the SAAS version of Codefresh, then you don't have any control of cache policies. +The SAAS version is fully controlled by Codefresh personnel and the cache policies in place might clear caches sooner than you think. + +If you run a pipeline very infrequently it is possible to suffer many cache misses. If you also use obscure Docker images you might see them downloaded again and again. + +If you run the [hybrid or on-prem versions]({{site.baseurl}}/docs/enterprise/installation-security/) of Codefresh, then your system administrator is responsible for fine-tuning the cache settings. + +## Related articles +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Parallel pipelines]({{site.baseurl}}/docs/pipelines/advanced-workflows/) diff --git a/_docs/pipelines/pipelines.md b/_docs/pipelines/pipelines.md new file mode 100644 index 00000000..443383d1 --- /dev/null +++ b/_docs/pipelines/pipelines.md @@ -0,0 +1,330 @@ +--- +title: "Creating CI pipelines" +description: "Define CI pipelines in Codefresh" +group: pipelines +redirect_from: + - /docs/pipeline + - /docs/pipeline/ + - /docs/pipelines + - /docs/pipelines/ + - /docs/pipelines/introduction/ + - /docs/pipelines/introduction + - /docs/inline-yaml-editing + - /docs/inline-yaml-editing/ +toc: true +--- + +Before creating a pipeline, make sure you are familiar with the theory behind [Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/). + +## Pipeline concepts + +The aim of Codefresh pipelines is to have re-usable sequences of steps that can be used for different applications (or micro-services) via the use of Git triggers. + +The main concepts are shown below: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/concepts.png" +url="/images/pipeline/create/concepts.png" +alt="Pipeline concepts" +caption="Pipeline concepts" +max-width="60%" +%} + +* **Projects**: The top-level concept in Codefresh CI/CD. Projects are used to group related CI pipelines. In most cases, a single project will be a single application that itself contains many microservices. You are free to use projects as you see fit. For example, you could create a project for a specific Kubernetes cluster or for a specific team/department. + +* **Pipelines**: Each project can have multiple pipelines. Pipelines that belong to a single project can be managed as a unit. You can also create a new pipeline by copying an existing pipeline. Notice that unlike other CI solutions, a pipeline in Codefresh is **NOT** tied to a specific Git repository. You should try to make your pipelines generic enough so that they can be reused for similar applications even when they exist in different Git repositories (a fairly typical setup for microservices). + +* **Pipeline steps**: Each pipeline has a definition that defines the [pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) that are executed each time the pipeline is triggered. The definition of a pipeline is described in a special [codefresh.yml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) file. The `codefresh.yml` file can be fetched from the same repository as that of the source code, from a completely different repository, or even defined in-place in the Codefresh pipeline editor. Again, notice you can have a pipeline that checks out its source code from Git repository A, but actually defines its steps in a `codefresh.yml` file that is fetched from Git repository B. + +* **Triggers**: A pipeline can have zero, one, or many [triggers]({{site.baseurl}}/docs/pipelines/triggers/). Triggers are the linking medium between a pipeline and a Git repository. Codefresh supports several kinds of triggers such as Git, Cron, and Docker push triggers. +Triggers that happen with Git webhooks can come from the same Git repository that contains the git code, **OR**, a completely different repository. You can have a pipeline with multiple triggers to be executed when a code change happens to any of them. + +With these basic building blocks, you can define many complex workflows. In particular, it is very easy in Codefresh to create a scenario where: + +1. A pipeline is launched because a trigger exists for Git repository A +1. The pipeline reads its `codefresh.yml` file from Git repository B +1. The pipeline clones source code from Git repository C (and starts packaging/compiling it) + +Of course, you can also have a simpler scenario where the trigger, the pipeline steps and the source code of the application are all defined for the same Git repository. + + +## Creating a pipeline + +You can create new projects by clicking on *Projects* in the left sidebar and then selecting the *New Project* button on the top right corner. A dialog will appear that will ask you for the project name and optional tags that you can use for [access control]({{site.baseurl}}/docs/enterprise/access-control/). + +Once you are inside the project view you can start editing pipelines with a UI environment that works similar to a traditional IDE. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/pipeline-manager.png" +url="/images/pipeline/create/pipeline-manager.png" +alt="Pipeline manager" +caption="Pipeline manager" +max-width="70%" +%} + +1. On the top left you can see your current project. You can also change it by clicking on the drop-down on the top left corner. + +1. On the left side of the screen you can see all pipelines that currently belong to this project. Click on each one to edit it. +On the bottom part of this panel the *New pipeline* button allows you to create a new pipeline on the same project either from scratch +or by copying an existing one from the same project or a completely different project. + +1. The name of the currently edited pipeline is shown at the top of the window. + +1. The main window shows the definition of the current pipeline. The screenshot shows the inline editor but pipelines can also be defined from external files (checked into source control) as explained later. + +1. The right part of the window shows extra settings for this pipeline such as [predefined steps]({{site.baseurl}}/docs/codefresh-yaml/steps/), [triggers]({{site.baseurl}}/docs/pipelines/triggers/) and launch variables/parameters. + + + + +### Using the Inline pipeline editor + +When first creating a pipeline you will see an inline editor that allows you to define the [pipeline yml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) right there in the Codefresh UI. This is great when you are starting a new project because it offers you really quick feedback. You can edit the yml steps, run a build, edit again, run a build and so on. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/inline-editor.png" +url="/images/pipeline/create/inline-editor.png" +alt="Inline Pipeline editor" +caption="Inline Pipeline editor" +max-width="60%" +%} + +On the top right of the panel you have additional controls: + +* The *import* button allows you to bring a `codefresh.yml` from your local workstation into the editor +* The *comment* button allows you to quickly comment/uncomment the currently selected text. The hotkey `Ctrl-/` also performs the same action +* The *formatting* button enriches the editor with special symbols for line breaks, spaces and tabs. This allows you to easily fix common formatting errors +* The *copy* button quickly copies the **whole** pipeline text in your clipboard +* You can use `Ctrl-]` and `Ctrl-[` to change indentation of the current line (use the Command key instead on MacOsX) + + +Notice that in the editor you can expand/collapse individual yaml blocks using the arrow triangles on the left of each blocks. The initial pipeline presented in the editor is suggested by Codefresh according to the contents of your Git repository. + +> You can also see the suggested Codefresh pipeline for any public git repository by using the [analyze option](https://codefresh-io.github.io/cli/analyzer/){:target="\_blank"} of the Codefresh CLI. + + +## Loading codefresh.yml from Version Control + +Working with the inline editor is very convenient in the beginning, but it makes your pipeline definition only exist within the Codefresh UI and therefore goes against the basic principles of [infrastructure as code](https://en.wikipedia.org/wiki/Infrastructure_as_Code){:target="\_blank"}. Once you are happy with how your pipeline works you should commit it to a Git repository (which can be the same one that has the source code of the application or a completely different one). + +You can click on the *Inline YAML* header and switch it to *Use YAML from URL* or *Use YAML from Repository*. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/pipeline-from-internal-repo.png" +url="/images/pipeline/create/pipeline-from-internal-repo.png" +alt="Pipeline from internal repo" +caption="Pipeline from internal repo" +max-width="60%" +%} + +You can then copy and paste a URL to a raw Codefresh YAML file. This will allow you to load a Codefresh YAML from any public URL. Notice that a raw URL is needed in the case of GitHub. + +As an example, instead of using `https://github.com/codefresh-contrib/example-voting-app/blob/master/codefresh.yml` you should enter `https://raw.githubusercontent.com/codefresh-contrib/example-voting-app/master/codefresh.yml` + +## Pipeline settings + +Once you create your pipeline you can also click on the top tab called *Settings* for some extra parameters. + +### General + +- **Pipeline Name**: The name of your pipeline (useful for working with the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"}) +- **Pipeline ID**: The ID of your pipeline (useful for working with the [Codefresh CLI](https://codefresh-io.github.io/cli/){:target="\_blank"}) + > When working with the Codefresh CLI, the Pipeline Name and ID are interchangeable. +- **Pipeline Description**: Freetext pdescription of the pipeline. +- **Pipeline Tags**: One or more tags used for [access control]({{site.baseurl}}/docs/administration/access-control/) +- **Public Build Logs**: If enabled, [users without a Codefresh account]({{site.baseurl}}/docs/pipelines/build-status/#public-build-logs) can view the builds of this pipeline. +- **Template**: Convert this pipeline to a template (see the next section for details on templates). +- **Badges**: Simple images that show you the last [build status]({{site.baseurl}}/docs/pipelines/build-status/). + +### Policies + +- **Pipeline Concurrency**: The maximum number of concurrent builds (0-14 or unlimited). Set the concurrency when your pipeline has only one trigger. + > A Pipeline Concurrency of **0** freezes execution of the pipeline, switching it to maintenance mode. Use this concurrency setting to modify existing pipelines and freeze execution until you complete the changes. +- **Trigger Concurrency**: The maximum number of concurrent builds per trigger (1-15 or unlimited). Define the trigger concurrency when your pipeline has multiple triggers. +- **Branch Concurrency**: The maximum number of concurrent builds per branch (1-15 or unlimited). Define this when your pipeline can build different branches. +- **Build Termination**: Options that determine when a build from the pipeline should terminate: + - Once a build is created terminate previous builds from the same branch + - Once a build is created terminate previous builds only from a specific branch (name matches a regular expression) + - Once a build is created, terminate all other running builds + - Once a build is terminated, terminate all child builds initiated from it +- **Pending approval volume**: Choose what happens with the [pipeline volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) when a pipeline is waiting for [approval]({{site.baseurl}}/docs/pipelines/steps/approval/#keeping-the-shared-volume-after-an-approval) + - Keep the volume available + - Discard the volume + - Honor the option defined globally in your Codefresh account +- **Pending approval concurrency limit effect**: Determines if a build that is pending approval [counts against]({{site.baseurl}}/docs/pipelines/steps/approval/#define-concurrency-limits) the concurrency limits or not + - Builds in pending approval will **not** be counted when determining the concurrency limit for a pipeline + - Builds in pending approval will **be** counted when determining the concurrency limit for a pipeline + - Honor the option defined globally in your Codefresh account + +The **Pipeline and Trigger Concurrency** limits are very important as they allow you to define how many instances of a pipeline can run in parallel when multiple commits or multiple pull requests take place. + +> Notice that these limits are *unrelated* to [parallelism within a single pipeline]({{site.baseurl}}/docs/pipelines/advanced-workflows/). + +Some common scenarios are: + +* a pipeline that uses a shared resource such as a database or queue and you want to limit how many pipelines can access it +* a pipeline that deploys to a single production environment (in most cases you only want one active pipeline touching production + +The **Build Termination** settings are useful for pipelines where you commit too fast (i.e. faster then the actual runtime of the pipeline). +All these settings allow you to lesser the build instance for pipelines when too many triggers are launched at the same time. +You will find them very useful in cases where too many developers are performing small commits and builds take a long time to finish (i.e. build takes 10 minutes to finish and developers perform multiple pushes every 2 minutes) + +Some common scenarios are: + +* You are interested only on the latest commit of a branch. If pipelines from earlier commits are still running you want to terminate them. +* You don't want to wait for children pipelines to finish (i.e. when a pipeline calls another pipeline) or when a new build starts for a parent pipeline. + +For the volume behavior during approvals, notice that if [you keep the volume available]({{site.baseurl}}/docs/pipelines/steps/approval/#keeping-the-shared-volume-after-an-approval) on the pipeline while it is waiting for approval it will still count as "running" against your pricing tier limit. + +### External resources + +In a big organization you might have some reusable scripts or other resources (such as Dockerfiles) that you want to use in multiple pipelines. Instead of fetching them manually in freestyle steps you can simply define them as *external resources*. When a pipeline runs, Codefresh will fetch them automatically and once the pipeline starts the files/folders will already be available in the paths that you define. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/external-resources.png" +url="/images/pipeline/create/external-resources.png" +alt="Bringing external resources into a pipeline" +caption="Bringing external resources into a pipeline" +max-width="80%" +%} + +Currently Codefresh supports the automatic fetching of files or folders from another Git repository. To create an external resource click the *Add Resource* button and choose: + +* The Git repository that contains the files/folder you wish to bring in the pipeline workspace +* The branch from the Git repository that contains the files/folders you wish to bring in the pipeline workspace +* The source folder in the GIT repo (use relative path) +* The target folder in the pipeline workspace where the file folder will be copied to (use absolute path) + +Once the pipeline starts, all files will be available to all freestyle steps in the paths mentioned in the target folder field. +You can define multiple external resources in a single pipeline. + +### Runtime + +- **Runtime Environment**: (by default this is set to SaaS) +- **Runtime OS**: (by default this is set to Linux) +- **Resources Size**: + - Small (recommended for 1-2 concurrent steps) + - Medium (recommended 3-4 steps) + - Large (recommended 5-6 steps) + +#### Set disk space for pipeline builds +Set the disk space you need for the pipeline's build volume. Configuring the disk space per pipeline build volume prevents out-of-space scenarios that lead to failed builds. The disk space set for the pipeline is inherited by all the builds run for the pipeline. + +Codefresh calculates the available range according to the disk size, and automatically sets the disk space for the build volume to 70% of the total disk space. You can either retain the default allocation or change as needed. + +>You can also configure the disk space for a [specific trigger]({{site.baseurl}}/docs/pipelines/triggers/git-triggers/#set-minimum-disk-space-for-build-volume-by-trigger) used by the pipeline or for a specific run, and override what's set for the pipeline. + +1. Select the pipeline for which to set the disk space. +1. Select **Settings**, and then **Runtime**. +1. Enable **Set minimum required disk space** and either retain the default displayed or change as needed. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/set-build-disk-space.png" +url="/images/pipeline/create/set-build-disk-space.png" +alt="Set disk space for pipeline builds" +caption="Set disk space for pipeline builds" +max-width="60%" +%} + + +## Using Pipeline Templates + +Codefresh also supports the creation of pipeline "templates", which are blueprints for creating new pipelines. +To enable the creation of pipelines from templates first visit the global pipeline configuration at [https://g.codefresh.io/account-admin/account-conf/pipeline-settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings){:target="\_blank"} and toggle the *Enable Pipeline Templates* button. + +The easiest way to create a new template is by clicking the "3 dots menu" on the pipeline name: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/create-template-menu.png" +url="/images/pipeline/create/create-template-menu.png" +alt="Create template from pipeline" +caption="Create template from pipeline" +max-width="30%" +%} + +From the dialog you can select if you want to copy this pipeline as a brand new template, or simply convert the pipeline itself to a template: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/template-dialog.png" +url="/images/pipeline/create/template-dialog.png" +alt="Template options" +caption="Template options" +max-width="80%" +%} + +Once the template is created, you can edit it like any other pipeline. Pipeline templates are marked with the `template` tag and also have a special mark in the pipeline menu: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/template-tag.png" +url="/images/pipeline/create/template-tag.png" +alt="Identify pipelines used as templates" +caption="Identify pipelines used as templates" +max-width="90%" +%} + +Now when you create a new pipeline, you can also select which pipeline template will be used as an initial pipeline definition: + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/use-template.png" +url="/images/pipeline/create/use-template.png" +alt="Using a template" +caption="Using a template" +max-width="70%" +%} + +>Notice that templates only take effect during pipeline creation. Changing a template afterwards, has no effect on pipelines that are already created from it. + +You can also quickly convert a pipeline to a template, by visiting the pipeline settings and clicking the *template* button under the *General* tab. + + +## Pipelines that do not belong to any project + +Although we recommend adding all your pipelines to a project, this is not a hard requirement. You can create pipelines that do not belong to a project from the *Pipelines* section on the left sidebar. +If you have a Codefresh account created before May 2019 you might already have several pipelines that are like this. + +If you change your mind, you can also add detached pipelines (i.e. pipelines that are not part of a project) manually from the 3-dot menu that is found on the right of each pipeline. + +{% include +image.html +lightbox="true" +file="/images/pipeline/create/add-pipeline-to-project.png" +url="/images/pipeline/create/add-pipeline-to-project.png" +alt="Changing the project of a pipeline" +caption="Changing the project of a pipeline" +max-width="90%" +%} + +Pipelines that belong to a project will mention it below their name so it is very easy to understand which pipelines belong to a project and which do not. + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[External Docker Registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +[YAML Examples]({{site.baseurl}}/docs/yaml-examples/examples/) + + + + + diff --git a/_docs/pipelines/post-step-operations.md b/_docs/pipelines/post-step-operations.md new file mode 100644 index 00000000..9e259da9 --- /dev/null +++ b/_docs/pipelines/post-step-operations.md @@ -0,0 +1,115 @@ +--- +title: "Post-Step Operations" +description: "Annotate your builds and run extra steps" +group: codefresh-yaml +redirect_from: + - /docs/post-step-operations/ +toc: true +--- +Post-step operations are a set of optional predefined processes that can be configured on any step. These operations will be executed once the step has completed. The post-step operations allow you to annotate your builds, images and pipelines with extra metadata or run other steps. + + +## Result Aware Post-Step Operations +You may execute post-step operations conditionally, based on the outcome of the step itself. + +To execute operations only when the step has completed successfully, use `on_success`: + + +{% highlight yaml %} +step_name: + ... + on_success: + ... +{% endhighlight %} + +To execute operations only when the step has failed, use `on_fail`: + + +{% highlight yaml %} +step_name: + ... + on_fail: + ... +{% endhighlight %} + +## Result Agnostic Post-Step Operations +You may execute post-step operations regardless of the outcome of the step itself. + +To execute operations regardless of the result, use `on_finish`: + + +{% highlight yaml %} +step_name: + ... + on_finish: + ... +{% endhighlight %} + +## Available Post-Step Operations + +- [Image Metadata]({{site.baseurl}}/docs/docker-registries/metadata-annotations/) +- [Custom Annotations]({{site.baseurl}}/docs/codefresh-yaml/annotations/) +- [Hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/) + +## Example + +Marking a Docker image with the results of unit tests: + +{% highlight yaml %} +{% raw %} +build_step: + title: Building My Docker image + type: build + image_name: my-app-image + tag: 1.0.1 + dockerfile: Dockerfile +run_tests: + title: Running unit tests + image: ${{build_step}} + commands: + - npm install + - npm run test + on_success: # Execute only once the step succeeded + metadata: + set: + - ${{build_step.imageId}}: + - unit_tests: passed +{% endraw %} +{% endhighlight %} + +## Running other steps + +If you want to run another step in the pipeline when another step fails or succeeds you need to use [conditional execution of steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) and the `fail_fast` property. You can also use [step hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/) for dedicated post step actions. + +{% highlight yaml %} +{% raw %} +run_tests: + title: Running unit tests + image: node:11 + fail_fast: false + commands: + - npm install + - npm run test +print_error_message: + image: alpine:latest + title: Marking pipeline status + commands: + - echo "Unit tests failed" + when: + condition: + all: + myCondition: run_tests.result == 'failure' +{% endraw %} +{% endhighlight %} + +In this example the step `print_error_message` will only run if step `run_tests` has failed. + +See also [advanced workflows]({{site.baseurl}}/docs/codefresh-yaml/advanced-workflows/#single-step-dependencies) and [Pipeline/Step hooks]({{site.baseurl}}/docs/codefresh-yaml/hooks/). + +## Related articles +[Conditional execution of steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) +[Working directories]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Annotations in CI pipelines]({{site.baseurl}}/docs/pipelines/annotations/) +[Hooks in CI pipelines]({{site.baseurl}}/docs/pipelines/hooks/) + + diff --git a/_docs/pipelines/running-pipelines-locally.md b/_docs/pipelines/running-pipelines-locally.md new file mode 100644 index 00000000..3c513987 --- /dev/null +++ b/_docs/pipelines/running-pipelines-locally.md @@ -0,0 +1,124 @@ +--- +title: "Running pipelines locally" +description: "How to run Codefresh pipelines on your workstation" +group: configure-ci-cd-pipeline +toc: true +redirect_from: + - /docs/troubleshooting/common-issues/debugging-codefresh-builds-locally/ + - /docs/troubleshooting/common-issues/access-and-debug-the-pipeline-volume-image/ +--- + +Codefresh can run your pipelines locally. This is very handy when you need to debug a pipeline, or when you want to do quick changes to the [codefresh.yml file]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) with the fastest turn-around time possible. + +## Prerequisites + +You need to have Docker installed on your local workstation. You can follow the [official instructions](https://docs.docker.com/install/) to install it. Notice that if you use Linux, the Docker version offered by your native +package manager is not always the latest version. + +Once docker is installed, check that it runs correctly with: + +``` +docker run hello-world +``` + +You should get a short welcome message. + +>At the time of writing local builds can only run on Linux and Mac workstations. We are working to remove this limitation and allow developers with Windows machines to also run Codefresh pipelines locally. + +Then install the [open-source Codefresh CLI](https://codefresh-io.github.io/cli/installation/) and [setup authentication](https://codefresh-io.github.io/cli/getting-started/) with your Codefresh account. + +Once this is done check that your account is locally accessible by running + +``` +codefresh get pipelines +``` + +You should see a long list with your pipelines on the terminal output. + +## Running a pipeline locally + +The Codefresh Command Line Interface (CLI) comes with a [run parameter](https://codefresh-io.github.io/cli/pipelines/run-pipeline/) that allows you to trigger pipelines externally (outside the Codefresh UI). + +Normally, if you run a pipeline this way the CLI will just trigger it remotely (the pipeline itself will still run in the Codefresh infrastructure). + +You can pass however the `--local` option, and this will instruct the CLI to automatically: + +1. Download the Codefresh build engine locally to your workstation (which itself is a docker image at [codefresh/engine](https://hub.docker.com/r/codefresh/engine)) +1. Run the build locally using the Codefresh engine on your workstation +1. Print all build logs to your terminal + +Note that the engine has transparent network access to all the other settings in your Codefresh account and therefore will work exactly the same way as if it was run on Codefresh infrastructure (e.g. use the connected Docker registries you have setup in the UI) + +Here is a full example: + +``` +codefresh run francisco-codefresh/jan_19/my-basic-pipeline --local -b master -t my-trigger +``` + + + +### Keeping the pipeline volume in the local workstation + +If you are familiar with +[how Codefresh pipelines work]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) you should know about the unique docker volume that is automatically shared between all pipeline steps. + +This volume (which also includes the project folder) makes data sharing between all steps very easy (e.g. with thing such as test reports or binary dependencies). + +{% include +image.html +lightbox="true" +file="/images/pipeline/introduction/codefresh-volume.png" +url="/images/pipeline/introduction/codefresh-volume.png" +alt="Codefresh volume" +caption="All steps share the same volume" +max-width="80%" +%} + +By default, if you run a Codefresh pipeline locally, this shared volume will automatically be discarded at the end of the build. You can still keep the volume after the build by adding the `--local-volume` parameter in your [run command](https://codefresh-io.github.io/cli/pipelines/run-pipeline/). Here is an example: + +``` +codefresh run francisco-codefresh/jan_19/my-basic-pipeline --local --local-volume -b master -t my-trigger +``` + + +Once the build runs you will see in your terminal the path that holds the contents of the volume: + +``` +[...build logs...] +Using /Users/fcocozza/.Codefresh/francisco-codefresh/jan_19/my-basic-pipeline as a local volume. +[...more build logs] +``` + +After the build has finished you can freely explore this folder in your filesystem with any file manager. + +``` +$ ls -alh /Users/fcocozza/.Codefresh/francisco-codefresh/jan_19/my-basic-pipeline/ +total 16 +drwxr-xr-x 5 fcocozza staff 160B Jan 14 12:52 . +drwxr-xr-x 3 fcocozza staff 96B Jan 14 12:52 .. +-rwxr-xr-x 1 fcocozza staff 388B Jan 14 12:52 cf_export +-rw-rw-r-- 1 fcocozza staff 189B Jan 14 12:52 env_vars_to_export +drwxr-xr-x 5 fcocozza staff 160B Jan 14 12:52 jan_19 +``` +This way you can verify if the pipeline has access to the data you think it should have + + +### Using a custom codefresh.yml file + +The ultimate way to run a pipeline locally is to override completely the `codefresh.yml` file it uses. A pipeline by default will read its steps from the respective file in git. + +You can force it to ignore that git version of the pipeline spec and instead load a custom `codefresh.yml` from your local file-system (which might not be even committed yet). + +The extra parameter is `--yaml` in that case. +Here is a complete example + +``` +codefresh run francisco-codefresh/jan_19/my-basic-pipeline --local --local-volume --yaml=my-codefresh.yml -b master -t my-trigger +``` + +When this pipeline runs locally, it will use whatever steps exist in `my-codefresh.yml` instead of the git version. The shared data volume will also be left intact after the build is finished as explained in the previous section. + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Introduction to Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines) diff --git a/_docs/pipelines/service-containers.md b/_docs/pipelines/service-containers.md new file mode 100644 index 00000000..68f286f1 --- /dev/null +++ b/_docs/pipelines/service-containers.md @@ -0,0 +1,570 @@ +--- +title: "Service containers in CI pipelines" +description: "How to use sidecar services in your pipelines" +group: pipelines +toc: true +--- + +Sometimes you wish to run sidecar containers in a pipeline that offer additional services for your builds. The most common scenario is launching services such as databases in order to accommodate [integration tests]({{site.baseurl}}/docs/testing/integration-tests/). Or you might wish to launch the application itself in order to run integration tests **against** it as part of the pipeline. + +>Note that while [composition steps]({{site.baseurl}}/docs/codefresh-yaml/steps/composition/) are still supported, the recommended way to run integrations tests going forward is with service containers. The underlying implementation is shared so check the composition documentation page for more available options +and properties. + +Codefresh includes a handy mechanism (based on Docker compose) that can help you run sidecar containers along your main pipeline. Here is a very simple example. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_database + composition: + my-redis-db-host: + image: redis:latest + ports: + - 6379 +steps: + my_integration_tests: + image: my-app-image + title: Running integration tests + commands: + - npm run test + services: + - my_database +{% endraw %} +{% endhighlight %} + +This pipeline will run integration tests during the freestyle step called `my_integration_tests` and at that point a Redis instance will be available at hostname `my-redis-db-host` and port 6379. Note how in this example, the service container is placed at the root of the pipeline (as opposed to inside a specific step). This ensures that the Redis instance is running for [the duration of the pipeline]({{site.baseurl}}/docs/codefresh-yaml/service-containers/#running-services-for-the-duration-of-the-pipeline). + +>Service Containers are based on Docker Compose. This document does not have the complete list of available options available. Please refer to Docker Compose versions [2](https://docs.docker.com/compose/compose-file/compose-file-v2/) and [3](https://docs.docker.com/compose/compose-file/), but not point releases such as 2.1. + + +## Viewing Service containers + +The service containers have their own output tab in Codefresh UI + +{% include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/services/services-tab.png" + url="/images//pipeline/codefresh-yaml/services/services-tab.png" + alt="Output tab from extra services" + caption="Output tab from extra services" + max-width="100%" + %} + +This way it is very easy to differentiate between the output logs of the step itself and its supporting container services. + + +## Launching multiple sidecar containers + +Like Docker compose it is possible to launch multiple services this way. For example, let's say that a Java application needs both Redis and MongoDB during integration tests. Here is the respective pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_extra_services + composition: + my-redis-db-host: + image: redis:latest + ports: + - 6379 + my-mongo-db-host: + image: mongo:latest + ports: + - 27017 +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-app" + git: github + revision: "master" + my_tests: + image: maven:3.5.2-jdk-8-alpine + title: "Running Integration tests" + commands: + - 'mvn integration-test' +{% endraw %} +{% endhighlight %} + +The Redis instance will be available through the networks at `my-redis-db-host:6379` while the MongoDB instance will run at `my-mongo-db-host:27017`. + +Instead of mentioning all your services directly in the YAML file you might also reuse an existing composition you have already defined in Codefresh by mentioning it by name. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_extra_services + composition: redis_and_mongo +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-app" + revision: "master" + git: github + my_tests: + image: maven:3.5.2-jdk-8-alpine + title: "Unit tests" + commands: + - 'mvn integration-test' +{% endraw %} +{% endhighlight %} + +This pipeline mentions an existing composition called `redis_and_mongo`: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/existing-composition.png" +url="/images/pipeline/codefresh-yaml/existing-composition.png" +alt="Using an existing composition" +caption="Using an existing composition" +max-width="70%" +%} + +This makes very easy to reuse compositions that you have already defined for other reasons [in the Codefresh UI](https://codefresh.io/docs/docs/testing/create-composition/). + + +## Running services for the duration of the pipeline + +Notice that unlike compositions, the services defined in the root of the pipeline yaml are present for the **whole** pipeline duration. They are available in all pipeline steps. This can be seen in the following example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +services: + name: my_database + composition: + my-redis-db-host: + image: redis:latest + ports: + - 6379 +steps: + my_first_step: + image: alpine:latest + title: Storing Redis data + commands: + - apk --update add redis + - redis-cli -u redis://my-redis-db-host:6379 -n 0 LPUSH mylist "hello world" + - echo finished + services: + - my_database + my_second_step: + image: alpine:latest + commands: + - echo "Another step in the middle of the pipeline" + my_third_step: + image: alpine:latest + title: Reading Redis data + commands: + - apk --update add redis + - redis-cli -u redis://my-redis-db-host:6379 -n 0 LPOP mylist + services: + - my_database +{% endraw %} +{% endhighlight %} + +This pipeline: + +1. Starts a single Redis instance +1. Saves some data in the first step on the pipeline +1. Runs an unrelated step (that itself is not using the redis instance) +1. Reads the data saved in the third steps + +If you run this pipeline you will see that that data read in the third step of the pipeline was the same one as the data saved in the first step. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/redis-example.png" +url="/images/pipeline/codefresh-yaml/redis-example.png" +alt="Redis read/write example" +caption="Redis read/write example" +max-width="90%" +%} + +This means that you can easily use the extra services in different steps of a single pipeline, without relaunching them each time (which is what happens with composition steps). + +## Using sidecar services in specific steps + +It is important to understand that any services you launch in a pipeline, are sharing its memory. If for example your pipeline has 4GBs of memory and your service (e.g. a mongdb instance) consumes 1GB, then you only have 3GB available for the actual pipeline. + +It is therefore possible to a assign a service to a specific step if you don't wish to have it running for the duration of the whole pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-example" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-java-app" + dockerfile: "Dockerfile" + tag: latest + my_unit_tests: + image: '${{build_image}}' + title: "Unit tests" + commands: + - 'echo start testing my app' + services: + composition: + my_redis_service: + image: 'redis:latest' + ports: + - 6379 + my_integration_tests: + image: '${{build_image}}' + title: "Integration tests" + commands: + - 'echo start testing my app' + services: + composition: + my_mongo_Service: + image: 'mongo:latest' + ports: + - 27017 +{% endraw %} +{% endhighlight %} + +In this pipeline, the Redis instance is only launched during the Unit test step, while the MongoDB service is active only during integration tests. + +You can also use a `docker-compose.yml` file that you might have in your git repository. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-java-example" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-java-app" + dockerfile: "Dockerfile" + tag: latest + my_unit_tests: + image: '${{build_image}}' + title: "Unit tests" + commands: + - 'echo start testing my app' + services: + composition: + my_redis_service: + image: 'redis:latest' + ports: + - 6379 + my_integration_tests: + image: '${{build_image}}' + title: "Integration tests" + commands: + - 'echo start testing my app' + services: + composition: 'docker-compose.yml' +{% endraw %} +{% endhighlight %} + +Note that in this case the `docker-compose.yml` file must mention [specific images](https://docs.docker.com/compose/compose-file/#image) (and not use [build properties](https://docs.docker.com/compose/compose-file/#build)). + + +## Launching a custom service + +So far all the examples of extra services used predefined docker images (i.e. Redis and Mongo). You are free however to launch any custom docker image you have already created or even the main application of the pipeline. + +This happens by mentioning a build step as a service image. Here is an example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-back-end" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-backend-app" + tag: latest + dockerfile: "Dockerfile" + run_integration_tests: + title: Test backend + image: 'my-front-end:latest' + commands: + - 'curl my_backend_app:8080' + - 'echo Backend is up. Starting tests' + - npm run integration-test + services: + composition: + my_backend_app: + image: '${{build_image}}' + ports: + - 8080 +{% endraw %} +{% endhighlight %} + +Here a Dockerfile for a backend application is built on the spot and then is launched as sidecar container in the next step (with a hostname of `my_backend_app`). Notice that the `image` property in the sidecar service actually refers to a [Codefresh variable]({{site.baseurl}}/docs/codefresh-yaml/variables/) that holds the name of the build step. + +We then run a `curl` command against the sidecar container to verify the correct health of the application. This is a great way to run integration tests against multiple micro-services. + + +## Checking readiness of a service + +When you launch multiple services in your pipelines, you don't know exactly when they will start. Maybe they will be ready once you expect them, but maybe they take too long to start. For example if you use a MySQL database in your integration tests, your integration tests need to know that the database is actually up before trying to use it. + +This is the same issue that is present in [vanilla Docker compose](https://docs.docker.com/compose/startup-order/). You can use solutions such as [wait-for-it](https://github.com/vishnubob/wait-for-it) to overcome this limitation, but Codefresh offers a better way in the form of *service readiness*. + +With a readiness block you can guarantee that a sidecar service will be actually up before the pipeline will continue. Here is an example: + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-back-end" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-backend-app" + tag: latest + dockerfile: "Dockerfile" + run_integration_tests: + title: Test backend + image: 'my-front-end:latest' + commands: + # Backend is certainly up at this point. + - npm run integration-test + services: + composition: + my_backend_app: + image: '${{build_image}}' + ports: + - 8080 + readiness: + image: 'byrnedo/alpine-curl' + timeoutSeconds: 30 + commands: + - "curl my_backend_app:8080" +{% endraw %} +{% endhighlight %} + + +This is an improvement over the previous example because the healthcheck of the back-end is managed by Codefresh. The added `readiness` block makes sure that the back-end service is actually up before the integration tests start by using a `curl` command to check that `my_backend_app:8080` is up and running. Codefresh will run the commands defined in the `readiness` in a loop until they succeed. You are free to use any of your favorite commands there (ping, curl, nc etc) that check one or more services. We also define a timeout for the healthcheck. The `readiness` block supports the following options: + +* `periodSeconds`: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. +* `timeoutSeconds`: Number of seconds after which the probe times out. Defaults to 10 seconds. Minimum value is 1. +* `successThreshold`: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for readiness. Minimum value is 1. +* `failureThreshold`: failureThreshold times before giving up. In case of readiness probe the Pod will be marked Unready. Defaults to 3. Minimum value is 1 + +If you know already how [Kubernetes readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/) work, then these settings will be very familiar to you. + +Here is another example where we use the `pg_isready` command to make sure that a PostgreSQL database is ready to accept connections +before we run the integration tests. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-rails-app" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-rails-app" + tag: "latest" + dockerfile: "Dockerfile" + run_integration_tests: + image: '${{build_image}}' + commands: + # PostgreSQL is certainly up at this point + - rails db:migrate + - rails test + services: + composition: + my_postgresql_db: + image: postgres:latest + ports: + - 5432 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: 'postgres:latest' + commands: + - "pg_isready -h my_postgresql_db" +{% endraw %} +{% endhighlight %} + +In summary `readiness` make sure that your services are actually up before you use them in a Codefresh pipeline. + +## Preloading data to databases + +A very common scenario when using databases in integration tests is the need to preload some test data in the database. +While you could do that in a normal pipeline step, sidecar services have a special `setup` block for this purpose. This way not only you can make sure that the database is up (using the `readiness` property explained in the previous section) but also that it is preloaded with the correct data. + +To use this capability add a `setup` block in your pipeline service container: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + main_clone: + type: "git-clone" + description: "Cloning main repository..." + repo: "kostis-codefresh/my-rails-app" + revision: "master" + git: github + build_image: + title: "Building Docker Image" + type: "build" + image_name: "my-rails-app" + tag: "latest" + dockerfile: "Dockerfile" + run_integration_tests: + image: '${{build_image}}' + commands: + # PostgreSQL is certainly up at this point and has the correct data + - rails test + services: + composition: + my_postgresql_db: + image: postgres:latest + ports: + - 5432 + readiness: + timeoutSeconds: 30 + periodSeconds: 15 + image: 'postgres:latest' + commands: + - "pg_isready -h my_postgresql_db" + setup: + image: 'postgres:latest' + commands: + - "wget my-staging-server.exaple.com/testdata/preload.sql" + - "psql -h my_postgresql_db < testdata/preload.sql" +{% endraw %} +{% endhighlight %} + +Notice that in that case the sequence of events is the following + +1. Codefresh will launch the container image(s) mentioned in the composition block +1. The `readiness` block will run until the service image is ready to accept connections +1. The `setup` block will run and preload data or setup any custom commands you have placed in the property +1. The actual pipeline step will now run with the service container attached in the same network. + +## Accessing containers via localhost + +Ideally, your application should be able to access other services by other DNS names that are fully configurable (this is a very good practice for [integration tests]({{site.baseurl}}/docs/testing/integration-tests/) as well). + +Sometimes, however, and especially in legacy applications, your application might be hardcoded to look at other services at `localhost`. +In that case, you can use the attribute `shared_host_network: true` on the services definition. Now all linked containers can access each other's services via localhost. +When `composition: ./docker-compose.yml` is used, this parameter is supported only in on-premises and hybrid environments. In cloud environments, for security reasons, this parameter is ignored. + +Here is an example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_step: + image: goodsmileduck/redis-cli + title: Storing Redis data + commands: + - apk add curl + - 'redis-cli -u redis://localhost:6379 -n 0 LPUSH mylist "hello world"' + - 'curl http://localhost:80' + - echo finished + services: + shared_host_network: true + composition: + my_redis_service: + image: 'redis:latest' + my_nginx: + image: nginx +{% endraw %} +{% endhighlight %} + +You can also do the same thing with top level services: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +services: + name: my_database + shared_host_network: true + composition: + my_redis_service: + image: 'redis:latest' + my_nginx: + image: nginx +steps: + my_first_step: + image: goodsmileduck/redis-cli + title: Storing Redis data + commands: + - apk add curl + - 'redis-cli -u redis://localhost:6379 -n 0 LPUSH mylist "hello world"' + - 'curl http://localhost:80' + - echo finished + services: + - my_database +{% endraw %} +{% endhighlight %} + +Note: we do recommend you only use this option as a last resort. You should not hardcode "localhost" as a requirement in your services as +it adds extra constraints with integration tests (and especially with dynamic test environments). + + +## Limitations + +Service containers are not compatible with [custom pipeline steps]({{site.baseurl}}/docs/codefresh-yaml/steps/#limitations-of-custom-plugins). + + + + +## Related articles +[Unit tests]({{site.baseurl}}/docs/testing/unit-tests/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Integration test with database]({{site.baseurl}}/docs/example-catalog/ci-examples/integration-tests-with-database/) +[Creating Compositions]({{site.baseurl}}/docs/on-demand-test-environment/create-composition/) + + + + + + + + diff --git a/_docs/pipelines/stages.md b/_docs/pipelines/stages.md new file mode 100644 index 00000000..42d50c59 --- /dev/null +++ b/_docs/pipelines/stages.md @@ -0,0 +1,195 @@ +--- +title: "Grouping steps in CI pipelines" +description: "Group steps into stages for better visualization" +group: pipelines +toc: true +--- + +With Codefresh you can [create really complex pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) with any number of steps. + +To better visualize the pipeline, you can group several steps into a single _stage_. The _stage_ with the group of steps are displayed as a separate column in the [pipeline view]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +url="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +alt="Complex pipeline" +caption="Complex pipeline" +max-width="70%" +%} + +In this example, the pipeline has four stages. + +## Assigning steps to a stage + +Stages are completely optional, and for really small pipelines they are not needed at all. +By default, all pipeline steps are shown one after the other. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/linear-view.png" +url="/images/pipeline/codefresh-yaml/stages/linear-view.png" +alt="Default pipeline view" +caption="Default pipeline view" +max-width="50%" +%} + +This view works ok for small pipelines, but for a big number of steps it is better to group them into pipeline *stages* like shown below: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/example.png" +url="/images/pipeline/codefresh-yaml/stages/example.png" +alt="Different pipeline stages" +caption="Different pipeline stages" +max-width="80%" +%} + +The number of stages (i.e columns) and their titles is completely configurable. +To enable this view, you need to make two modifications at the `codefresh.yml` file: + +Here is the skeleton: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +stages: + - [stage-name-1] + - [stage-name-2] + +steps: + step-name: + [step-contents] + stage: [name-of-stage] + another-step: + [step-contents] + stage: [name-of-stage] + the-very-last-step: + [step-contents] + stage: [name-of-stage] +{% endhighlight %} + +As you can see the modifications needed are: + +1. To list all the stage names at the root of the pipeline file +1. To use the `stage` property on each step to assign it to a stage + +>This updated pipeline view affects only the visualization of the pipeline. It does not affect the order of step execution. Steps are still executed in the same order as listed in the `codefresh.yml` file. + If you wish to use parallel execution and advanced workflows see the [parallel steps]({{site.baseurl}}/docs/codefresh-yaml/advanced-workflows/) page. + + +## Example pipeline with several stages + +Here is a more concrete example that you can use as a starting point: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - prepare + - test + - build + - scan + - integration + - deploy +steps: + step1: + stage: 'prepare' + image: node + commands: + - 'echo "Hello Step 1!"' + step2: + image: node + stage: 'prepare' + commands: + - 'echo "Hello Step 2!"' + step3: + image: node + stage: 'test' + commands: + - 'echo "Hello Step 3!"' + step4: + image: node + stage: 'build' + commands: + - 'echo "Hello Step 4!"' + step5: + image: node + stage: 'scan' + commands: + - 'echo "Hello Step 5!"' + step6: + image: node + stage: 'scan' + commands: + - 'echo "Hello Step 6!"' + step7: + image: node + stage: 'integration' + commands: + - 'echo "Hello Step 7!"' + step8: + image: node + stage: 'deploy' + commands: + - 'echo "Hello Step 8!"' + step9: + image: node + stage: 'deploy' + commands: + - 'echo "Hello Step 9!"' +{% endraw %} +{% endhighlight %} + +If you run the pipeline you will see this view + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/complex.png" +url="/images/pipeline/codefresh-yaml/stages/complex.png" +alt="Complex Pipeline view" +caption="Complex Pipeline view" +max-width="80%" +%} + +Remember that the assignment of a step to a stage is happening only for graphical grouping purposes. It does +not affect the way your steps run. All steps will still run in the same order mentioned in the `codefresh.yml` file. + +Also notice if you enable this view a stage called *default* will show all build steps that are not explicitly assigned to a stage. + +## Using spaces in stage names + +If you wish to have spaces in stage names you need to quote them like this: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my test phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app + dockerfile: Dockerfile + MyUnitTests: + title: Unit testing + stage: 'my test phase' + image: ${{MyAppDockerImage}} + commands: + - npm run test +{% endraw %} +{% endhighlight %} + + +## Related articles +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Parallel workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/) diff --git a/_docs/pipelines/steps.md b/_docs/pipelines/steps.md new file mode 100644 index 00000000..6d32c11f --- /dev/null +++ b/_docs/pipelines/steps.md @@ -0,0 +1,1226 @@ +--- +title: "Steps in pipelines" +description: "Types of steps in Codefresh pipelines" +group: pipelines +redirect_from: + - /docs/steps/ +toc: true +--- + +Codefresh [pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) are composed of a series of steps. + +You can create your own pipelines by writing a [codefresh.yml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) file that describes your pipeline. This file can then be version controlled on its own (pipeline as code). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/stages/complex-pipeline.png" +url="/images/pipeline/codefresh-yamlstages/complex-pipeline.png" +alt="Pipeline steps" +caption="Pipeline steps" +max-width="80%" +%} + + + +## Built-in step types + +The steps offered by Codefresh are: + +* [Git clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) + **Git clone** steps allow you to checkout code in your pipeline from any internal or external repository. Existing accounts that still use repositories instead of [projects]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-concepts) have an implicit clone step in the pipelines. + +* [Freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) + **Freestyle** steps are the cornerstone of Codefresh pipelines. They allow you to run any command within the context of a Docker container. A lot of Codefresh optimizations such as the [shared docker volume]({{site.baseurl}}/docs/configure-ci-cd-pipeline/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) are designed specifically for freestyle steps. +Freestyle steps are a secure replacement for `docker run` commands. + +* [Build]({{site.baseurl}}/docs/pipelines/steps/build/) + **Build** steps are the main way where you get access to the Docker daemon (Docker as a service) in Codefresh pipelines. Build steps take as input any Dockerfile and run it on the cloud in a similar manner to what you do on your workstation. Build steps automatically push the result to the default Docker registry of your account (no need for docker login commands). Codefresh also comes with a global Docker cache that automatically gets attached to all build nodes. Build steps are a secure replacement for `docker build` commands. + +* [Push]({{site.baseurl}}/docs/pipelines/steps/push/) +**Push** steps allow you to push and tag your docker images (created by the build step) in any [external Docker registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). Push steps are *not* needed at all if you work with only the internal Codefresh registry. Push steps are a secure replacement for the `docker tag` and `docker push` commands. + +* [Composition]({{site.baseurl}}/docs/pipelines/steps/composition/) + **Composition** steps allow you to run multiple services together in the Codefresh infrastructure and execute unit tests or other commands against them. They are discarded once a pipeline finishes. Composition steps are a secure replacement for `docker-compose` definitions. + +* [Launch test environment]({{site.baseurl}}/docs/pipelines/steps/launch-composition/) + **Launch test environment** steps behave similar to compositions, but they persist after the pipeline ends. This is a great way to create preview environment from your pull requests and send to colleagues. + +* [Deploy]({{site.baseurl}}/docs/pipelines/steps/deploy/) + **Deploy steps** allow you to [perform Kubernetes deployments]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) in a declarative manner. They embody the Continuous Deployment aspect of Codefresh. + +* [Approval]({{site.baseurl}}/docs/pipelines/steps/approval/) + **Approval steps** allow you to pause pipelines and wait for human intervention before resuming. They allow you to embrace the concepts of Continuous Delivery. + + + +>Codefresh also supports [parallel workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/), as well as running pipelines [locally on your workstation]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/). + +## Step directory + +In the case of freestyle steps we also offer a [plugin marketplace](https://codefresh.io/steps/) with several existing plugins for popular integrations. + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-directory.png" +url="/images/pipeline/plugin-directory.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="80%" +%} + +Codefresh steps can be: + +* Private (visible only to you and your team) or public (visible to everybody via the marketplace) +* Official (supported by the Codefresh team) or community based +* Ready for production or still incubating. + +You can use any your pipelines any of the public steps already in the marketplace, any steps created by your team and any steps that you create for yourself. + +## Using custom pipeline steps + +When you create a pipeline, you will have access to two categories of steps: + +* Public steps that exist in the marketplace +* Steps that you or your team have created (visible only to you) + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/choose-step.png" +url="/images/pipeline/codefresh-yaml/steps/choose-step.png" +alt="Choosing a custom step" +caption="Choosing a custom step" +max-width="60%" +%} + +To use a step, first click on the pipeline section where you want to insert the step. +You will get a new dialog with all the details of the step along with a live preview of the exact +[yaml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) that will be inserted in your pipeline. + +For all steps you can define: + +* The title of the text (which will also be visible in the pipeline UI) +* A freetext description +* The [stage]({{site.baseurl}}/docs/pipelines/stages/) that will contain the step + +The rest of the fields are specific to each step. See the documentation of each step in order to understand what each field should contain. There are fields for each step that are marked as required and are essential for the step to work. These are marked with an asterisk. + +Once a step is added to the pipeline, you are fee to change the resulting yaml even further by just typing in the pipeline editor. + +## Creating your own step + +There are two ways to create custom steps in Codefresh. The simplest way is to package an existing CLI tool into a Docker image and use it as a freestyle step. The more advanced way is creating a typed step with explicit input and output parameters. + +Here is a summary on the two ways: + +{: .table .table-bordered .table-hover} +| | Custom freestyle step | Codefresh typed plugin | +| -------------- | ---------------------------- |-------------------------| +| Assets needed | A Docker image | A Docker image and a plugin manifest| +| Knowledge required | Docker building/pushing | Docker and Codefresh CLI | +| Step can be used | In any Docker based CI/CD platform | In Codefresh | +| Effort required | Minimal | Medium | +| Distribution via | Dockerhub | Codefresh marketplace | +| Input variables | Yes | Yes| +| Output variables | No | Yes | +| Versioning via | Docker tags | Manifest entry | +| Grouping of multiple steps | No | Yes | +| Marketplace entry | Not possible| Possible/optional | +| Best for sharing steps | with your team/company | with the world | + + + +We suggest that you start with custom freestyle steps first and only create typed plugins once you are familiar with Codefresh pipelines or want your plugin to appear in the marketplace. + + +### Creating a custom freestyle step + +As an example let's say that you need to use the [JFrog CLI](https://jfrog.com/getcli/) in a pipeline in order to interact with a Artifactory or Bintray. JFrog does not offer any Docker image that contains the CLI and you already know that all Codefresh steps [are actually Docker images]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/). + +Therefore you can easily package the CLI into a Docker image and then make it available to any Codefresh pipeline that wishes to use it. +First you create [a Dockerfile](https://github.com/kostis-codefresh/step-examples/blob/master/jfrog-cli-wrapper/Dockerfile) that packages the CLI + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM debian:stable-slim + +WORKDIR /jfrog-cli + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/* + +RUN curl -fL https://getcli.jfrog.io | sh + +ENV JFROG_CLI_OFFER_CONFIG false +ENV BINTRAY_LICENCES MIT + +RUN /jfrog-cli/jfrog bt config --licenses $BINTRAY_LICENCES + +RUN ln -s /jfrog-cli/jfrog /usr/local/bin/jfrog + +CMD ["/jfrog-cli/jfrog"] +{% endraw %} +{% endhighlight %} + +This is a standard Dockerfile. There is nothing specific to Codefresh in the image that gets created. You can test this Dockerfile locally with + +{% highlight shell %} +{% raw %} +docker build . -t jfrog-cli +docker run jfrog-cli +{% endraw %} +{% endhighlight %} + +In a similar manner you can package any other executable and its dependencies. You could even just package `curl` with an external URL that hosts the service that you want to interact in a Codefresh pipeline. + +Once the Dockerfile is ready, you need to push it to Dockerhub. You can either do it manually from your workstation, but it is best if you actually create a [Codefresh pipeline](https://github.com/kostis-codefresh/step-examples/blob/master/jfrog-cli-wrapper/codefresh.yml) that does it for you. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/create-custom-step.png" +url="/images/pipeline/codefresh-yaml/steps/create-custom-step.png" +alt="Creating a custom freestyle step" +caption="Creating a custom freestyle step" +max-width="80%" +%} + +Now that the image is ready and public you can notify your team that the new plugin is ready. +Everybody who wants to interact with JFrog Bintray and/or Artifactory can place [the following snippet](https://github.com/kostis-codefresh/step-examples/blob/master/jfrog-cli-wrapper/codefresh-example.yml) in a pipeline: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + run_frog_cli: + title: Running jfrog CLI inside Docker + image: kkapelon/jfrog-cli + commands: + - jfrog bt --help + - jfrog rt --help +{% endraw %} +{% endhighlight %} + +You can then customize the exact command(s) that you want to run with the tool. All capabilities of freestyle steps are possible, such as passing environment variables as input parameters. + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + run_frog_cli: + title: Running jfrog CLI inside Docker + image: kkapelon/jfrog-cli + commands: + - jfrog bt package-show google/tensorflow/tensorflow + environment: + - BINTRAY_USER=my-user + - BINTRAY_KEY=my-secret-key +{% endraw %} +{% endhighlight %} + +If you want to use multiple versions of the step in the same pipeline, you can just create different docker tags. Notice that you can also use a [private registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) instead of Dockerhub if you wish your step to be used only within your organization. + + + +### Creating a typed Codefresh plugin + +You can use the [Codefresh CLI](https://codefresh-io.github.io/cli/) and more specifically the [step-type resource](https://codefresh-io.github.io/cli/steps/) to create your own typed step. Each Codefresh step is composed from two parts: + +1. The step description in the special yaml syntax for describing Codefresh steps +1. A Docker image that implements the step (optional) + +The easiest way to create your own step is to start by using the definition of an existing step. + +{% highlight bash %} +codefresh get step-type vault -o yaml > vault-step.yml +{% endhighlight %} + +Here is the resulting yaml: + + `vault-step.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: / + isPublic: false + description: >- + The plugin exports KV pairs from Hashicorp Vault to Codefresh pipeline ENV + variables + sources: + - 'https://github.com/codefresh-io/steps/tree/master/incubating/vault' + stage: incubating + maintainers: + - name: Alexander Aladov + categories: + - featured + official: false + tags: [] + icon: + type: svg + url: 'https://cdn.jsdelivr.net/gh/codefresh-io/steps/incubating/vault/icon.svg' + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + Vault_to_Env: + title: Importing vault values + type: vault + arguments: + VAULT_ADDR: '${{VAULT_ADDR}}' + VAULT_PATH: '${{VAULT_PATH}}' + VAULT_AUTH_TOKEN: '${{VAULT_AUTH_TOKEN}}' + VAULT_CLIENT_CERT_BASE64: '${{VAULT_CLIENT_CERT_BASE64}}' + VAULT_CLIENT_KEY_BASE64: '${{VAULT_CLIENT_KEY_BASE64}}' + created_at: '2019-07-03T14:57:02.057Z' + updated_at: '2019-09-18T08:15:28.476Z' + latest: true + version: 0.0.1 + id: 5d1cc23ea7e22e40227ea75d +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "VAULT_ADDR", + "VAULT_PATH", + "VAULT_AUTH_TOKEN" + ], + "properties": { + "VAULT_ADDR": { + "type": "string", + "description": "Vault server URI. Example: https://vault.testdomain.io:8200 (required)" + }, + "VAULT_PATH": { + "type": "string", + "description": "Path to secrets in vault. Example: secret/codefreshsecret (required)" + }, + "VAULT_AUTH_TOKEN": { + "type": "string", + "description": "Vault authentication token (required)" + }, + "VAULT_CLIENT_CERT_BASE64": { + "type": "string", + "description": "Base64 encoded client cerificate" + }, + "VAULT_CLIENT_KEY_BASE64": { + "type": "string", + "description": "Base64 encoded client key" + } + } + } + steps: + main: + name: vault + image: codefreshplugins/vault + environment: + - 'VAULT_ADDR=${{VAULT_ADDR}}' + - 'VAULT_PATH=${{VAULT_PATH}}' + - 'VAULT_AUTH_TOKEN=${{VAULT_AUTH_TOKEN}}' + - 'VAULT_CLIENT_CERT_BASE64=${{VAULT_CLIENT_CERT_BASE64}}' + - 'VAULT_CLIENT_KEY_BASE64=${{VAULT_CLIENT_KEY_BASE64}}' +{% endraw %} +{% endhighlight %} + +For each step you define the following sections: + +* Metadata to describe the characteristics of the step +* The description of its arguments +* The implementation (i.e. what yaml gets inserted in the pipeline) + +For the metadata section note the following: + +* `isPublic` decides if this step is visible only to your and your team, or visible to all (in the marketplace) +* The `name` of the step **must** be prefixed with your Codefresh account name. Steps created by the Codefresh team are on the root level of the hierarchy (without prefix). This is the same pattern that Dockerhub is using for images. +* `stage` shown if this step is ready for production or still incubating. This is just an indication to users. It doesn't affect the implementation of the step in any way +* `icon`. Ideally you provide a transparent svg so that the icon is scalable. The icon for a step is used both in the marketplace as well as the pipeline view. You can also select a default background to be used. Alternatively, you can define jpg/png icons for large/medium/small sizes. We suggest the svg approach +* The `version` property allows you to update your plugin and keep multiple variants of it in the marketplace +* The `examples` section will be shown in the marketplace as documentation for your step + +For the argument section we follow the [JSON Schema](http://json-schema.org/learn/miscellaneous-examples.html). You can use the [Schema generator](https://jsonschema.net/) to easily create a schema. JSON schema is used for arguments (i.e. input parameters) as well as output parameters as we will see later on. + +The property `additionalProperties` defines how strict the plugin will be with its arguments. If you set it to `false` (which is usually what you want) the pipeline will fail if the plugin is given more arguments that it is expecting. If you set it to `true`, then the plugin will only use the arguments it understands and will ignore the rest. + +The final part is the step implementation. Here you can define exactly the yaml that this step will insert in the pipeline. You can use any of the built-in steps in Codefresh and even add multiple steps. + +>Note that currently you cannot nest custom pipeline steps. We are aware of this limitation and are actively working on it, but at the time or writing you cannot use a typed step inside another typed step. + +Once you are done with your step, use the Codefresh CLI to upload it to the marketplace. If you want the step to be available only to you and your team make sure that the property `isPublic` is false (and then it will not be shown in the marketplace). + +{% highlight bash %} +codefresh create step-type -f my-custom-step.yml +{% endhighlight %} + +If you make further changes to your step you can update it: + +{% highlight bash %} +codefresh replace step-type -f my-custom-step.yml +{% endhighlight %} + +If you want to remove your step from the marketplace, you can delete it completely: + +{% highlight bash %} +codefresh delete step-type kostis-codefresh/sample +{% endhighlight %} + +### Versioning of typed steps + +The `version` property under `metadata` in the plugin manifest allows you to publish multiple releases of the same plugin in the marketplace. Codefresh will keep all previous plugins and users are free to choose which version they want. + +To create a new version of your plugin: + +1. Update the `version` property under `metadata` in your custom YAML. +2. Run: + +{% highlight bash %} +codefresh create step-type -f custom-plugin.yaml +{% endhighlight %} + +You will now be able to see the new versions of your plugin in the step marketplace drop-down: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/step-versions.png" +url="/images/pipeline/codefresh-yaml/steps/step-versions.png" +alt="Different step versions" +caption="Different step versions" +max-width="60%" +%} + +You can also use the Codefresh CLI to list all version: + +{% highlight bash %} +codefresh get step-types kostis-codefresh/sample --versions +{% endhighlight %} + +To delete a specific version, use: + +{% highlight bash %} +codefresh delete step-type 'account/plugin:' +{% endhighlight %} + +Note that Codefresh step versions function like Docker tags in the sense that they are *mutable*. You can overwrite an existing plugin version with a new plugin manifest by using the `codefresh replace step-type` command. + +If users do not define a version once they use the plugin, the latest one (according to [semantic versioning](https://semver.org/)) will be used. Alternatively they can specify the exact version they need (even different versions within the same pipeline.) + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_step_1: + title: Running old custom step + type: kostis-codefresh/sample:1.2.1 + my_step_2: + title: Running new custom step + type: kostis-codefresh/sample:1.3.5 +{% endraw %} +{% endhighlight %} + +### Example with input parameters + +Let's create a very simple step called *node-version*. This step will read the application version from a NodeJS project and expose it as an environment variable. This way we can use the application version later in the pipeline (for example to tag a docker image). + +Here is the respective [step yaml](https://github.com/kostis-codefresh/step-examples/blob/master/node-version-plugin/read-app-version.yml). + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/node-version + isPublic: false + description: >- + The plugin exports as an environment variable the application version from package.json + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - utility + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/nodejs-icon.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'my-github-user/my-github-repo' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/node-version + arguments: + PACKAGE_JSON_FOLDER: './my-github-repo' + print_app_version: + title: Printing app version + image: alpine + commands: + - echo $APP_VERSION + latest: true + version: 1.0.0 +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "PACKAGE_JSON_FOLDER" + ], + "properties": { + "PACKAGE_JSON_FOLDER": { + "type": "string", + "description": "folder where package.json is located" + } + } + } + steps: + main: + name: kostis-codefresh/node-version + image: node + commands: + - cd $WORK_DIR + - pwd + - APP_VERSION=$(node -p -e "require('./package.json').version") + - echo $APP_VERSION + - export APP_VERSION + - cf_export APP_VERSION + environment: + - 'WORK_DIR=${{PACKAGE_JSON_FOLDER}}' +{% endraw %} +{% endhighlight %} + +If you look at the `spec` section you will see that the plugin expects a single parameter called `PACKAGE_JSON_FOLDER`. This will +be passed by the plugin user to specify the folder that contains the `package.json` file. This way this plugin can be used for multiple applications. For example, the plugin user might check out 3 different Node.js projects and use the plugin to read the versions of all of them. + +The plugin implementation is specified in the `steps` sections. We use the standard [Node Docker image](https://hub.docker.com/_/node) to read the version from the `package.json` file. Notice how we convert the plugin argument to an environment variable called `WORK_DIR` + +By default all plugins start with the Codefresh volume at `/codefresh/volume` as a working folder. So with the `cd` command we enter the project folder (which we assume was checked out in a previous pipeline step). Once the version is read it is made available to all the other pipeline steps with the [cf_export command]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command). + +We now insert our plugin in the marketplace with the following command: + +{% highlight bash %} +codefresh create step-type -f read-app-version.yml +{% endhighlight %} + +The step is now ready to be used by anybody. + +An example user pipeline is shown at [codefresh.yml](https://github.com/kostis-codefresh/step-examples/blob/master/node-version-plugin/codefresh.yml) + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefreshdemo/example_nodejs_postgres' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/node-version + arguments: + PACKAGE_JSON_FOLDER: './example_nodejs_postgres' + print_app_version: + title: Printing app version + image: alpine + commands: + - echo $APP_VERSION +{% endraw %} +{% endhighlight %} + +This is a very simple pipeline that checks out a NodeJS project and uses our plugin. Notice how we pass as argument the required parameter `example_nodejs_postgres` to tell the plugin where our `package.json` file is located. Once the plugin runs the application version is available as an environment variable that we can use in other steps as `APP_VERSION`. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/input-parameters.png" +url="/images/pipeline/codefresh-yaml/steps/input-parameters.png" +alt="Step input parameters" +caption="Step input parameters" +max-width="60%" +%} + +The input parameter is also shown as required in the marketplace. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png" +url="/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png" +alt="Input parameters on marketplace" +caption="Input parameters on marketplace" +max-width="40%" +%} + +This is a trivial example, but is still shows how Codefresh pipeline can be declarative while actually doing a lot of imperative actions behind the scenes. + +### Example with output parameters + +In the previous example our plugin had an output parameter (`APP_VERSION`) that is created by the custom step and given back to the user. Even though creating an output parameter using only `cf_export` will work just fine in the technical level, it is best to formally define output parameters in the step definition. + +If you define output parameters in the step definition their names will appear on the marketplace and users will have an easier time understand what your step produces. You will be able to define complete JSON objects in addition to output strings. Formal output parameters are also available under a special notation (`step.outputs`) that we will explain in this example. + +We suggest you always formalize your output parameters in your step definition, especially when your step is having a large number of output parameters. + +The same [JSON Schema](http://json-schema.org/learn/miscellaneous-examples.html) is also used for output parameters as with input ones. +Here is a [very simple example](https://github.com/kostis-codefresh/step-examples/blob/master/output-parameters/output-parameters-sample.yml) that shows the different types of output parameters you can have. + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/output-parameters-example + isPublic: false + description: >- + The plugin shows how you can export output parameters + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - utility + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/bash-1.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + dummy_parameters: + title: Creating output parameters + type: kostis-codefresh/output-parameters-example + print_my_variables: + title: Printing dummy content + image: alpine + commands: + - echo $MY_NUMBER + - echo $MY_CITY + - echo $MY_FAVORITE_FOOD + - echo ${{steps.dummy_parameters.output.MY_NUMBER}} + - echo ${{steps.dummy_parameters.output.MY_CITY}} + - echo ${{steps.dummy_parameters.output.MY_FAVORITE_FOOD}} + latest: true + version: 1.0.0 +spec: + returns: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": true, + "patterns": [], + "required": [ + "MY_NUMBER", + "MY_CITY", + "MY_FAVORITE_FOOD" + ] + , + "properties": { + "MY_NUMBER": { + "type": "number", + "description": "an example variable that holds a number" + }, + "MY_CITY": { + "type": "object", + "description": "an example variable that holds a JSON object", + "required": ["city_name", "country", "population"], + "properties": { + "city_name": {"type": "string"}, + "country": {"type": "string"}, + "population": {"type": "integer"} + } + }, + "MY_FAVORITE_FOOD": { + "description": "an example variable that holds a number", + "type": "array", + "maxItems": 3, + "items": { + "type": "string" + } + } + } + } + steps: + main: + name: kostis-codefresh/output-parameters-example + image: alpine + commands: + - cf_export MY_NUMBER=42 + - cf_export MY_CITY='{"city_name":"San Francisco", "country":"usa","population":884363}' + - cf_export MY_FAVORITE_FOOD='["pizza", "ramen", "hot dogs"]' + +{% endraw %} +{% endhighlight %} + +This plugin exports 3 output parameters + +* `MY_NUMBER` - a single number +* `MY_CITY` - an object with fields `city_name`, `country`, `population` +* `MY_FAVORITE_FOOD` - an array. + +Output parameters are defined in the `returns` block. +The output parameters of the step are now shown in the marketplace so consumers of this plugin know what to expect when they use it. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png" +url="/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png" +alt="Output parameters on marketplace" +caption="Output parameters on marketplace" +max-width="40%" +%} + +As can be seen from the `examples` block, when you have formal output parameters you can also access them by mentioning the specific steps in your pipeline that creates it. The following are two equal ways to use an output parameter in your pipeline: + +``` +{% raw %} +echo $MY_NUMBER +echo ${{steps.dummy_parameters.output.MY_NUMBER}} +{% endraw %} +``` + +In the case of output parameters that are objects you can also use `jq` to get specific properties like this: + +``` +{% raw %} +echo ${{steps.dummy_parameters.output.MY_CITY}} | jq '.city_name' +{% endraw %} +``` + +This will print "San Francisco". + + +### Example with input/output parameters + +Let's take everything we learned from the previous examples and create a custom step that has + +1. A custom Docker image +1. Formal input parameters +1. Format output parameters + +In this simple example we will create a custom step that reads the Maven coordinates from a `pom.xml` file. Unlike `package.json`, a Maven file has 3 characteristics (group, artifact name and version). First we create a [very simple executable](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/mvncoords.go) that reads a Maven file and gives us these coordinates in JSON format. + +{% highlight shell %} +{% raw %} +mvncoords -f pom.xml +{"groupId":"com.example.codefresh","artifactId":"my-java-app","version":"3.0.2"} +{% endraw %} +{% endhighlight %} + +Next, we package this executable in a [Dockerfile](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/Dockerfile). + + `Dockerfile` +{% highlight docker %} +{% raw %} +FROM golang:1.12-alpine AS build_base + +WORKDIR /tmp/ + +COPY . . + +# Unit tests +RUN go test -v + +# Build the Go app +RUN go build -o ./out/mvncoords . + +# Start fresh from a smaller image +FROM alpine:3.9 + +COPY --from=build_base /tmp/out/mvncoords /usr/local/bin/mvncoords + +CMD ["mvncoords"] +{% endraw %} +{% endhighlight %} + +We now have a custom Docker image that contains our executable. If we want other people to use it, we need to push it to Dockerhub. You can do this manually from your workstation using `docker login` and `docker push` commands, but it is much better to automate this with a Codefresh pipeline. + + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/create-plugin-image.png" +url="/images/pipeline/codefresh-yaml/steps/create-plugin-image.png" +alt="Building a public Docker image" +caption="Building a public Docker image" +max-width="60%" +%} + +This [pipeline](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/codefresh.yml) checks out the Dockerfile plus source code, builds the docker image and then pushes it to Dockerhub (so that the image is public). + +Finally we are ready to create our Codefresh plugin. Here is the [specification](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/read-maven-version.yml): + + + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/mvn-version + isPublic: false + description: >- + The plugin exports as an environment variable the mvn coordinates from pom.xml + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - utility + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/java-4.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'my-github-user/my-github-repo' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/mvn-version + arguments: + POM_XML_FOLDER: './my-github-repo' + print_app_version: + title: Printing app coordinates + image: alpine + commands: + - echo $MVN_COORDS + - echo ${{steps.read_app_version.output.MVN_COORDS}} + latest: true + version: 1.0.0 +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "POM_XML_FOLDER" + ], + "properties": { + "POM_XML_FOLDER": { + "type": "string", + "description": "folder where pom.xml is located" + } + } + } + returns: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": true, + "patterns": [], + "required": [ + "MVN_COORDS" + ], + "properties": { + "MVN_COORDS": { + "type": "object", + "required": ["groupId", "artifactId", "version"], + "properties": { + "groupId": {"type": "string"}, + "artifactId": {"type": "string"}, + "version": {"type": "string"} + } + } + } + } + steps: + main: + name: kostis-codefresh/mvn-version + image: kkapelon/maven-version-extract + commands: + - cd $WORK_DIR + - MVN_COORDS=$(mvncoords -json) + - export MVN_COORDS + - cf_export MVN_COORDS + environment: + - 'WORK_DIR=${{POM_XML_FOLDER}}' +{% endraw %} +{% endhighlight %} + +We place this plugin into the marketplace with + +``` +codefresh create step-type -f read-maven-version.yml +``` + +If you look at the plugin entry in the marketplace you will see both input (the folder of the pom.xml) and output parameters (mvn coordinates) defined: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/plugin-parameters.png" +url="/images/pipeline/codefresh-yaml/steps/plugin-parameters.png" +alt="Input and output parameters" +caption="Input and output parameters" +max-width="60%" +%} + +The plugin is now ready to be used in a pipeline: + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/plugin-usage.png" +url="/images/pipeline/codefresh-yaml/steps/plugin-usage.png" +alt="Plugin usage" +caption="Plugin usage" +max-width="60%" +%} + +If you look at the [pipeline definition](https://github.com/kostis-codefresh/step-examples/blob/master/maven-version-plugin/codefresh-example.yml) you will see how we pass arguments in the plugin and get its output with the `steps.output` syntax. + + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Cloning main repository... + type: git-clone + repo: 'codefresh-contrib/spring-boot-2-sample-app' + revision: 'master' + git: github + read_app_version: + title: Reading app version + type: kostis-codefresh/mvn-version + arguments: + POM_XML_FOLDER: './spring-boot-2-sample-app' + print_app_version: + title: Printing app version + image: alpine + commands: + - echo $MVN_COORDS + - echo ${{steps.read_app_version.output.MVN_COORDS}} +{% endraw %} +{% endhighlight %} + +This was a trivial example, but it clearly demonstrates how a custom step communicates with the rest of the pipeline by getting input from the previous steps and preparing output for the steps that follow it. + +### Exporting parameters manually inside a plugin + +Normally, in a pipeline you can either use the [cf_export]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command) command or write directly to the [/codefresh/volume/env_vars_to_export]({{site.baseurl}}/docs/pipelines/variables/#directly-writing-to-the-file) file. + +However, inside a plugin you can also use the `/meta/env_vars_to_export` file that has the same semantics, but is used for exporting variables in the same scope as the plugin only. + +The rules for using `/meta/env_vars_to_export` are: +- When the step-type (plugin) does not define the `return` schema, all the output variables from substeps will be projected and exported as the root step (they may override each other). +- When `return` schema is defined, only the variables that matched the definition will be exported as root step. + +`plugin.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: /my-step + ... +spec: + arguments: |- + { + ... + } + returns: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": true, + "patterns": [], + "required": [ + "ROOT_VAR" + ] + , + "properties": { + "ROOT_VAR": { + "type": "string", + "description": "an example variable" + } + } + } + steps: + export_my_variable: + title: "Exporting custom variable" + image: alpine + commands: + - echo PLUGIN_VAR=Alice >> /meta/env_vars_to_export + - echo ROOT_VAR=Bob >> /meta/env_vars_to_export + read_my_variable: + title: "Reading custom variable" + image: alpine + commands: + - source /meta/env_vars_to_export + - echo $PLUGIN_VAR #Alice + - echo $ROOT_VAR #Bob +{% endraw %} +{% endhighlight %} + + +`codefresh.yaml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + plugin: + type: /my-step + echo: + image: alpine + commands: + - echo $PLUGIN_VAR #empty + - echo $ROOT_VAR #Bob +{% endraw %} +{% endhighlight %} + +You can still use `cf_export` command inside the plugin as well (as shown in the previous examples). + + +### Example with step templating + +As an advanced technique, Codefresh allows you to define a custom step using templating instead of fixed YAML. We support templates inside the `spec:` block of a plugin definition by taking advantage of the [Gomplate](https://github.com/hairyhenderson/gomplate) library that offers additional templating functions on top of vanilla [Go templates](https://golang.org/pkg/text/template/). + +> Note: Gomplate Data functions will not work since Codefresh does not pass the Data object to gomplate functions. + +As a simple example lets say we want to create a single step that checks out any number of git repositories. Of course you could just copy-paste the git clone step multiple times in a single pipeline. To make things easier we will create a single step that takes an array of git repositories and checks them out on its own: + +{% highlight yaml %} +{% raw %} +checkout_many_projects: + title: Checking out my Git projects + type: kostis-codefresh/multi-git-clone + arguments: + GIT_PROJECTS: + - 'codefresh-contrib/ruby-on-rails-sample-app' + - 'kubernetes/sample-apiserver' + - 'kostis-codefresh/nestjs-example' + - 'spring-projects/spring-petclinic' +{% endraw %} +{% endhighlight %} + +The GitHub projects are passed as an array, so if we want to check out an additional project, we simply add items to that array. + +Here is the [step specification](https://github.com/kostis-codefresh/step-examples/blob/master/multi-clone/multi-clone-step.yml): + + `plugin.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +kind: step-type +metadata: + name: kostis-codefresh/multi-git-clone + isPublic: false + description: >- + This pipeline plugin shows templating of custom steps + sources: + - 'https://github.com/kostis-codefresh/step-examples' + stage: incubating + maintainers: + - name: Kostis Kapelonis + categories: + - git + official: false + tags: [] + icon: + type: svg + url: https://cdn.worldvectorlogo.com/logos/git.svg + background: '#f4f4f4' + examples: + - description: example-1 + workflow: + version: '1.0' + steps: + checkout_many_projects: + title: Checking out my Git projects + type: kostis-codefresh/multi-git-clone + arguments: + GIT_REVISION: 'master' + GIT_PROVIDER: 'github' + GIT_PROJECTS: + - 'codefresh-contrib/ruby-on-rails-sample-app' + - 'kubernetes/sample-apiserver' + - 'kostis-codefresh/nestjs-example' + - 'spring-projects/spring-petclinic' + latest: true + version: 1.0.0 +spec: + arguments: |- + { + "definitions": {}, + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "additionalProperties": false, + "patterns": [], + "required": [ + "GIT_PROJECTS", + "GIT_REVISION", + "GIT_PROVIDER" + ], + "properties": { + "GIT_REVISION": { + "type": "string", + "description": "branch or tag or revision to checkout (same for all projects)" + }, + "GIT_PROVIDER": { + "type": "string", + "description": "Name of git provider to use from Codefresh integrations screen" + }, + "GIT_PROJECTS": { + "description": "A list/array of git projects to checkout", + "type": "array", + "maxItems": 10, + "items": { + "type": "string" + } + } + } + } + delimiters: + left: '[[' + right: ']]' + stepsTemplate: |- + print_info_message: + name: kostis-codefresh/multi-git-clone + title: Info message + image: alpine + commands: + - echo "Checking out [[ len .Arguments.GIT_PROJECTS ]] git projects" + [[ range $index, $git_project :=.Arguments.GIT_PROJECTS ]] + clone_project_[[$index]]: + title: Cloning [[$git_project]] ... + type: git-clone + repo: '[[$git_project]]' + revision: [[$.Arguments.GIT_REVISION]] + git: [[$.Arguments.GIT_PROVIDER]] + [[end]] +{% endraw %} +{% endhighlight %} + +There are two important points here: + +1. Instead of using a `steps:` block, we instead define a block called `stepsTemplate:`. This block name instructs Codefresh that we will use templates +1. Because the Codefresh runtime is already using the double curly braces for variables mentioned as {% raw %}`${{MY_VARIABLE_EXAMPLE}}`{% endraw %}, we instead define templates with the characters {% raw %}`[[]]`{% endraw %}. You can see the definitions for these characters inside the `delimiters:` block. You are free to use any other replacement characters of your choosing. + +In the `stepsTemplate` block we use Golang template keywoards such as `range`, `len` and template variables (such as `git_project`). You can use all the capabilities of Go templates (e.g. `if`, `range`, `with`) as well as the extra methods of [gomplate](https://docs.gomplate.ca/) such as math and net functions. + +Creating the [marketplace entry](https://codefresh.io/steps/step/kostis-codefresh%2Fmulti-git-clone) for a step with templates is exactly the same as any other step: + +``` +codefresh create step-type -f multi-clone-step.yml +``` + +You can then use the step in [any pipeline](https://github.com/kostis-codefresh/step-examples/blob/master/multi-clone/codefresh.yml) and pass the arguments that will fill the template: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + checkout_many_projects: + title: Checking out my Git projects + type: kostis-codefresh/multi-git-clone + arguments: + GIT_REVISION: 'master' + GIT_PROVIDER: 'github' + GIT_PROJECTS: + - 'codefresh-contrib/ruby-on-rails-sample-app' + - 'kubernetes/sample-apiserver' + - 'kostis-codefresh/nestjs-example' + - 'spring-projects/spring-petclinic' + print_my_workspace: + title: Show projects + image: alpine + commands: + - ls -l + - pwd +{% endraw %} +{% endhighlight %} + +We have also added two extra parameters, one for the git revision and one for the [git provider]({{site.baseurl}}/docs/integrations/git-providers/) that will be used during checkout. + +The end result is that with a single step you can checkout many projects. Checking out an additional project is as simple as adding a new entry in the `GIT_PROJECTS` array. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/multi-checkout.png" +url="/images/pipeline/codefresh-yaml/steps/multi-checkout.png" +alt="Checking out multiple Git repositories in a single step" +caption="Checking out multiple Git repositories in a single step" +max-width="60%" +%} + +This was a contrived example to demonstrate how you can use templates in the Codefresh plugin specification. Note that using templates in Codefresh steps is an advanced technique and should be used sparingly. + +### Limitations of custom plugins + +[Parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/) are not supported inside custom steps. + +Within a custom step, the [fail_fast field]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#execution-flow) does not work. Use the `failFast` field instead. + +Custom steps are not compatible with [service containers]({{site.baseurl}}/docs/pipelines/service-containers/). +More specifically: + + * If you have a [service container in the pipeline-level]({{site.baseurl}}/docs/pipelines/service-containers/#running-services-for-the-duration-of-the-pipeline), steps inside the custom plugin will not be able to access it + * If you try to attach a service container to a custom plugin, the plugin will fail when executed + * If you try to define a custom plugin where a step inside it has a service container attached, the custom plugin will fail when executed + +## Related articles +[Introduction to Pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) +[Build step]({{site.baseurl}}/docs/pipelines/steps/build/) +[Push step]({{site.baseurl}}/docs/pipelines/steps/push/) + diff --git a/_docs/pipelines/steps/approval.md b/_docs/pipelines/steps/approval.md new file mode 100644 index 00000000..9d35ca80 --- /dev/null +++ b/_docs/pipelines/steps/approval.md @@ -0,0 +1,348 @@ +--- +title: "Approval" +description: "How to Pause Pipelines for Manual Approval" +group: codefresh-yaml +sub_group: steps +toc: true +--- + +The approval step allows you to pause a pipeline and wait for human intervention before going on. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/approval/approval-waiting.png" +url="/images/pipeline/codefresh-yaml/approval/approval-waiting.png" +alt="Manual Approval step" +caption="Manual Approval step" +max-width="80%" +%} + +Some example scenarios for using the approval step: + +* Pause before deploying to production +* Pause before destroying an environment +* Pause for some manual smoke tests or metric collection + +## Usage + + `YAML` +{% highlight yaml %} +{% raw %} +step_name: + type: pending-approval + title: Step Title + description: Step description + timeout: + duration: 2 + finalState: approved + timeUnit: minutes + when: + branch: + only: [ master ] + +{% endraw %} +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `timeout` | Defines an automatic approval/rejection if a specified amount of time has passed. The `duration` field is hours. By default it is set to 168 (i.e, 7 days). The `finalState` field defines what will happen after the duration time has elapsed. Possible values are `approved`/`denied`/`terminated` | Optional | +| `timeUnit` | This field defines possible options of `minutes`, or `hours`. If the field is not set, the default is `hours` | Optional +| `fail_fast` | If set to false, the pipeline will continue even when the step is rejected | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `when` | Define a set of conditions that need to be satisfied in order to execute this step. You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) article. | Optional | + + +## Pausing the Pipeline + +Once the pipeline reaches an approval step it will stop. At this point it **does not** consume any resources. +In the Codefresh UI you will see the *Approve/Reject* buttons. + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/build-waiting.png" +url="/images/codefresh-yaml/approval/build-waiting.png" +alt="Build waiting for input" +caption="Build waiting for input" +max-width="80%" +%} + +Once you click any of them the pipeline will continue. Further steps in the pipeline can be enabled/disabled +according to the approval result. + +## Automatic Approvals/Rejections + +By default, a pipeline that contains an approval step will pause for 7 days (168 hours) onces it reaches that step. If you want some automatic action to happen after a specified time period you can define it in advance with the `timeout` property: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + waitForInputBeforeProduction: + type: pending-approval + title: Deploy to Production? + timeout: + duration: 2 + finalState: denied +{% endraw %} +{% endhighlight %} + +This pipeline will wait for approval for two hours. If somebody approves it, it will continue. If nothing happens after two hours +the approval step will be automatically rejected. + +## Approval Restrictions + +By default, any Codefresh user can approve any pipeline that is paused at the approval state. If you want to restrict +the approval action to a subset of people, you can use the [Access Control facilities]({{site.baseurl}}/docs/enterprise/access-control/) that Codefresh provides. + +This is a two-step process. First you need to tag your pipeline with one or more tags (tag names are arbitrary). You can edit tags in the pipeline settings screen. + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/pipeline-tag.png" +url="/images/codefresh-yaml/approval/pipeline-tag.png" +alt="Marking a pipeline with tags" +caption="Marking a pipeline with tags" +max-width="40%" +%} + +Once you have tagged your pipelines you can create one or more access rules that restrict approval to specific teams within your organization. + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/approval-rule.png" +url="/images/codefresh-yaml/approval/approval-rule.png" +alt="Rules for approvals" +caption="Rules for approvals" +max-width="80%" +%} + + +For more details on access control and users see also the [access control page]({{site.baseurl}}/docs/administration/access-control/). + +## Keeping the Shared Volume after an Approval + +As soon as a pipeline starts waiting for an approval, all contents of the [shared Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) are lost. Once the pipeline continues running all files that were created manually inside the volume are not available any more. + +If you want to keep any temporary files that were there before the approval, you need to enable the respective policy in your [pipeline settings]({{site.baseurl}}/docs/pipelines/pipelines/#policies). + +You can either set this option differently per pipeline, or globally in your account at your [account settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings). + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/keep-volume.png" +url="/images/codefresh-yaml/approval/keep-volume.png" +alt="Preserve Codefresh volume after an approval" +caption="Preserve Codefresh volume after an approval" +max-width="90%" +%} + +>Notice that if you do decide to keep the volume after an approval, the pipeline will still count as "running" against your pricing plan (if you use the SAAS version of Codefresh). If you don't keep the volume, the pipeline is stopped/paused while it is waiting for approval and doesn't count against your pricing plan. We advise you to keep the volume only for pipelines that really need this capability. + +>Notice also that you if you use the [Codefresh Runner]({{site.baseurl}}/docs/reference/behind-the-firewall/) and your [Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) is setup with local volumes, then the volume will only be present if the dind pod +is scheduled in the same node once the pipeline resumes. Otherwise the volume will not be reused. + +## Controlling the Rejection Behavior + +By default if you reject a pipeline, it will stop right away and it will be marked as failed. All subsequent steps after the approval one will not run at all. + +You might want to continue running the pipeline even when it is rejected by adding the `fail_fast` property in the approval step: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + waitForInputBeforeProduction: + fail_fast: false + type: pending-approval + title: Deploy to Production? +{% endraw %} +{% endhighlight %} + +In this case you can also read the approval result and make the pipeline work differently according to each choice (demonstrated in the following section). + + +## Getting the Approval Result + +As also explained in [step dependencies]({{site.baseurl}}/docs/pipelines/advanced-workflows/#custom-steps-dependencies) all steps in the Codefresh pipeline belong to a global object +called `steps` (indexed by name). You can read the `result` property for an approval step to see if it was approved or rejected. + +Here is an example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + askForPermission: + type: pending-approval + title: Destroy QA environment? + destroyQaEnvNow: + image: alpine:3.8 + title: Destroying env + commands: + - echo "Destroy command running" + when: + steps: + - name: askForPermission + on: + - approved +{% endraw %} +{% endhighlight %} + +In this example the second step that is destroying an environment will only run if the user +approves the first step. In case of rejection the second step will be skipped. + +You can follow the same pattern for running steps when an approval step was rejected. +Here is a full example with both cases. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- prepare +- yesPleaseDo +- noDont + +steps: + step_1: + image: alpine:3.8 + title: building chart + stage: prepare + commands: + - echo "prepare" + deployToProdNow: + fail_fast: false + type: pending-approval + title: Should we deploy to prod + stage: prepare + step_2: + image: alpine:3.8 + title: prepare environment + stage: yesPleaseDo + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - approved + step_3: + image: alpine:3.8 + title: deploy to production + stage: yesPleaseDo + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - approved + step_4: + image: alpine:3.8 + title: prepare environment + stage: noDont + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - denied + step_5: + image: alpine:3.8 + title: deploy to staging + stage: noDont + commands: + - echo "world" + when: + steps: + - name: deployToProdNow + on: + - denied +{% endraw %} +{% endhighlight %} + +Here is the pipeline state after a rejection: + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/pipeline-rejected.png" +url="/images/codefresh-yaml/approval/pipeline-rejected.png" +alt="Rejecting a pipeline" +caption="Rejecting a pipeline" +max-width="80%" +%} + +>Note that we have added the `fail_fast` property in the approval step because we want the pipeline to continue even when the step is rejected. + + +You can see that only two steps were ignored. If you rerun the pipeline and approve +it, the other two steps will be ignored. + +## Define Concurrency Limits + +Codefresh has the ability to limit the amount of running builds for a specific pipeline with several concurrency policies in the pipeline settings. You can choose if a build that is in a pending approval state will count against the concurrency limits or not. + +As an example let's say that the concurrency limit for a specific pipeline is set to 2. Currently there is one active/running build and a second build that is pending approval. + +1. If the pipeline settings define that builds in pending approval **count** against concurrency, then if you launch a third build it will wait until one of the first two has finished +1. If the pipeline settings define that builds in pending approval **do not** count against concurrency, then if you launch a third build it will execute right away. + +There isn't a correct or wrong way to set this option. It depends on your organization and if your consider builds pending approval as "active" or not. + +You can either set this option [differently per pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#policies), or globally in your account at your [account settings](https://g.codefresh.io/account-admin/account-conf/pipeline-settings). + + +## Slack Integration + +If you also enable [Slack integration]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) in Codefresh you will have the choice of approving/rejecting a pipeline +via a Slack channel + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/slack-approval.png" +url="/images/codefresh-yaml/approval/slack-approval.png" +alt="Approval step in a slack channel" +caption="Approval step in a slack channel" +max-width="80%" +%} + +To enable this behavior, you need to activate it in the Slack settings page: + +{% include +image.html +lightbox="true" +file="/images/codefresh-yaml/approval/slack-settings.png" +url="/images/codefresh-yaml/approval/slack-settings.png" +alt="Slack settings" +caption="Slack settings" +max-width="50%" +%} + +Also, if you run a pipeline manually that includes an approval step you should check +the "Report notification of pipeline execution" checkbox as explained in [Monitoring Pipelines]({{site.baseurl}}/docs/pipelines/monitoring-pipelines/#monitoring-pipelines-outside-the-codefresh-ui). + + + +## Related articles +[Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/) +[Advanced Workflows ]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +[Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) + + diff --git a/_docs/pipelines/steps/build.md b/_docs/pipelines/steps/build.md new file mode 100644 index 00000000..a270af73 --- /dev/null +++ b/_docs/pipelines/steps/build.md @@ -0,0 +1,379 @@ +--- +title: "Build" +description: "Building Docker images in Codefresh pipelines" +group: pipelines +sub_group: steps +redirect_from: + - /docs/build-1/ + - /docs/codefresh-yaml/steps/build-1/ +toc: true +--- +Use Docker to build an image and store it in Codefresh. + +## Purpose of build steps + +In Codefresh, docker containers are first-class citizens +and special typed steps are offered for the most usual docker commands. Build steps are a secure replacement for `docker build` commands. + +Therefore, this command on your local workstation: + +``` +docker build . -t my-app-image:1.0.1 +``` + +will become in Codefresh the following build step. + +```yaml +BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + tag: 1.0.1 +``` + +## Usage + + `YAML` +{% highlight yaml %} +step_name: + type: build + title: Step Title + description: Free text description + working_directory: {% raw %}${{clone_step_name}}{% endraw %} + dockerfile: path/to/Dockerfile + image_name: owner/new-image-name + tag: develop + build_arguments: + - key=value + target: stage1 + no_cache: false + no_cf_cache: false + tag_policy: original + fail_fast: false + metadata: + set: + - qa: pending + when: + condition: + all: + noDetectedSkipCI: "includes('{% raw %}${{CF_COMMIT_MESSAGE}}{% endraw %}', '[skip ci]') == false" + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `working_directory` | The directory in which the build command is executed. It can be an explicit path in the container's file system, or a variable that references another step.
                                                        The default is {% raw %} `${{main_clone}}` {% endraw %}. This only changes the Docker build context and is unrelated to the `WORKDIR` inside the Dockerile | Default | +| `dockerfile` | The path to the `Dockerfile` from which the image is built. The default is `Dockerfile`. | Default | +| `image_name` | The name for the image you build. | Required | +| `region` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The names of the regions for which to perform cross-region replication. The names of the source region and the destination region name must be defined in separate steps. | Optional | +| `tag` | The tag that is assigned to the image you build.
                                                        The default is the name of the branch or revision that is built. | Default | +| `tags` | Multiple tags under which to push the image. Use either this or `tag`. This is an array, so should be of the following style:
                                                        {::nomarkdown}
                                                        tags:
                                                        - tag1
                                                        - tag2
                                                        - {% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}
                                                        - tag4
                                                        {:/}or
                                                        {::nomarkdown}
                                                        tags: [ 'tag1', 'tag2', '{% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}', 'tag4' ]
                                                        {:/} | Optional | +| `registry` | The registry logical name of one of the inserted registries from the integration view.
                                                        The default value will be your default registry [if you have more than one]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). | Optional | +| `registry_contexts` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +|`disable_push` | Do not push to any registry automatically. | Optional | +|`tag_policy` | Push the tag name without change or lowercase it automatically. By default `tag: MixedCase` will be pushed as `image_name:mixedcase`. Possible options are `original` and `lowercase`. Default is `lowercase` | Default | +| `no_cache` | Disable Docker engine cache for the build [more info](https://codefresh.io/docs/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) | Optional | +| `no_cf_cache` | Disable Codefresh build optimization for the build [more info](https://codefresh.io/docs/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) +| `build_arguments` | A set of [Docker build arguments](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables-build-arg) to pass to the build process. | Optional | +| `target` | target stage in a multistage build (build will run until this stage) | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions that need to be satisfied in order to execute this step.
                                                        You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `metadata` | Annotate the built image with [key-value metadata]({{site.baseurl}}/docs/docker-registries/metadata-annotations/). | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | +| `buildkit` | Set to `true` to enable [Buildkit]({{site.baseurl}}/docs/pipelines/steps/build/#buildkit-support) and all of its enhancements | Optional | + +**Exported resources:** +- Working Directory +- Image ID + +## Examples + +Build an image using a Dockerfile in the root project folder: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build +{% endhighlight %} + +Build an image using a different Dockerfile and a specific version tag + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + dockerfile: my-custom.Dockerfile + tag: 1.0.1 +{% endhighlight %} + +Build an image using a different Dockerfile and push multiple tags to the default registry. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + dockerfile: my-custom.Dockerfile + tags: + - latest + - ${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}} + - v1.1 +{% endraw %} +{% endhighlight %} + +Build an image and automatically push to the [registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) with name `my-registry`. + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + type: build + image_name: my-app-image + dockerfile: my-custom.Dockerfile + tag: 1.0.1 + registry: my-registry +{% endhighlight %} + +Build two images in two different folders using [Codefresh variables]({{site.baseurl}}/docs/pipelines/variables/) as tags. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildNodeImage: + title: Building My Node app + type: build + image_name: my-department/my-team/my-node-image + dockerfile: Dockerfile + working_directory: ./project1 + tag: ${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}} + BuildGoImage: + title: Building My Go app + type: build + image_name: my-company/my-go-image + dockerfile: Dockerfile + working_directory: ./project2 + tag: ${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}} +{% endraw %} +{% endhighlight %} + +It also possible to build Docker images in [parallel]({{site.baseurl}}/docs/pipelines/advanced-workflows/) for faster builds. + +### Inline Dockerfile + +If your project does not already have a Dockerfile, you can also define one within the pipeline: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: my-own-go-app + working_directory: ./ + tag: '${{CF_BRANCH_TAG_NORMALIZED}}' + dockerfile: + content: |- + # --- + # Go Builder Image + FROM golang:1.8-alpine AS builder + # set build arguments: GitHub user and repository + ARG GH_USER + ARG GH_REPO + # Create and set working directory + RUN mkdir -p /go/src/github.com/$GH_USER/$GH_REPO + # copy file from builder image + COPY --from=builder /go/src/github.com/$GH_USER/$GH_REPO/dist/myapp + /usr/bin/myapp + CMD ["myapp", "--help"] +{% endraw %} +{% endhighlight %} + +Use this technique only as a last resort. It is better if the Dockerfile exists as an actual file in source control. + + +## Automatic pushing + +All images built successfully with the build step, will be automatically pushed to the default Docker registry in your account. This behavior is completely automatic and happens without any extra configuration on your part. If you want to disable this then add the `disable_push` property in your build step. + +>Notice that the [push step]({{site.baseurl}}/docs/pipelines/steps/push/) in Codefresh is optional and is only needed if you want to push to [external Docker registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). + +{% + include image.html + lightbox="true" + file="/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png" + url="/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png" + alt="Docker Images pushed automatically" + caption="Docker Images pushed automatically" + max-width="80%" +%} + +## Buildkit support + +Codefresh also allows you to use [buildkit](https://github.com/moby/buildkit) with all its [enhancements](https://docs.docker.com/develop/develop-images/build_enhancements/) and [experimental features](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#experimental-syntaxes). + +Using buildkit you can get: + +* Improved build output logs +* Mounting of external secrets that will never be stored in the image +* Access to SSH keys and sockets from within the Dockerfile +* Use cache and bind-mounts at build time + +These capabilities are offered as extra arguments in the build step and using any of them will automatically enable buildkit. You can utilize the different mount-options for the Dockerfile instruction `RUN` as long as buildkit is enabled for your build step. Mounts of type [`cache`](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#example-cache-go-packages) work out of the box and are persisted between pipeline runs. + +The simplest way to use buildkit is by enabling it explicitly: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + buildkit: true +{% endhighlight %} + +Buildkit is also automatically enabled if you use any of its features such as the `progress` property: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + progress: tty +{% endhighlight %} + +Possible values for `progress` are `tty` and `plain`. + +For secrets you can either mention them in a single line: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + secrets: + - id=secret1,src=./my-secret-file1.txt + - id=secret2,src=./my-secret-file2.txt +{% endhighlight %} + +or multiple lines: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + secrets: + - id: secret1 + src: ./my-secret-file1.txt + - id: secret2 + src: ./my-secret-file2.txt +{% endhighlight %} + +For the SSH connection you can either use the default: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + ssh: default +{% endhighlight %} + + +or define different keys: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + ssh: + - github=~/.ssh/github_rsa + - bitbucket=~/.ssh/bitbucket_rsa +{% endhighlight %} + +You might want to use an environment variable to store and retrieve a ssh key. This can be achieved by converting you ssh key into a one-line string: +``` +tr '\n' ',' < /path/to/id_rsa +``` + +Copy the output and place it an [environment variable]({{site.baseurl}}/docs/pipelines/variables/#user-provided-variables). To make the SSH key availabe to the build step, you can write it to the codefresh volume: +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + SetupSshKeys: + title: Setting up ssh key + image: alpine:latest + commands: + - mkdir /codefresh/volume/keys + - echo "${SSH_KEY}" | tr ',' '\n' > /codefresh/volume/keys/github_rsa + + BuildMyImage: + title: Building My Docker image + image_name: my-app-image + type: build + tag: latest + ssh: + - github=/codefresh/volume/keys/github_rsa +{% endraw %} +{% endhighlight %} + + +You can combine all options (`ssh`, `progress`, `secrets`) in a single build step if desired. + + + +## Related articles +[Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) diff --git a/_docs/pipelines/steps/composition.md b/_docs/pipelines/steps/composition.md new file mode 100644 index 00000000..40764e68 --- /dev/null +++ b/_docs/pipelines/steps/composition.md @@ -0,0 +1,434 @@ +--- +title: "Composition step" +description: "Run a Docker container with its dependencies inside a pipeline" +group: pipelines +sub_group: steps +redirect_from: + - /docs/composition-1/ + - /docs/codefresh-yaml/steps/composition-1/ +toc: true +--- +The composition step runs a Docker Composition as a means to execute finite commands in a more complex interaction of services. + +>Note that while composition steps are still supported, the recommended way to run integrations tests going forward is with [service containers]({{site.baseurl}}/docs/codefresh-yaml/service-containers/). + +## Motivation for Compositions + +The primary purpose of compositions is to run tests that require multiple services for their execution (often known as integration tests). + +The syntax offered by Codefresh closely follows the syntax for [Docker-compose](https://docs.docker.com/compose/overview/) files, but is technically not 100% the same (there are some important differences). However, if you are already familiar with Docker compose, you will be immediately familiar with Codefresh compositions. + +> Codefresh only understands Docker compose versions [2](https://docs.docker.com/compose/compose-file/compose-file-v2/) and [3](https://docs.docker.com/compose/compose-file/), but not point releases such as 2.1. + +The big difference between the Codefresh and Docker compose is that Codefresh is distinguishes between two kinds of services: + +* Composition Services +* Composition Candidates + +**Composition Services** are helper services that are needed for the tests to run. These can be a database, a queue, a cache, or the backend docker image of your application -- these closely parallel the services that you might define in Docker compose. + +**Composition Candidates** are special services that will execute the tests. Codefresh will monitor their execution and the build will fail if they do not succeed. Composition candidates are almost always Docker images that contain unit/integration tests or other kinds of tests (e.g. performance) + +You need at least one composition service and one candidate for the composition step. + + +## Usage + +Here is an example of a composition step. Note that there is one composition service (PostgreSQL database, named `db`) and one composition candidate (tests executed with gulp) + +The most important part is the `command` line that executes the tests: `command: gulp integration_test`. If it fails, then the whole composition step will fail. + + + + `codefresh.yml` +{% highlight yaml %} +step_name: + type: composition + title: Step Title + description: Free text description + working_directory: {% raw %}${{a_clone_step}}{% endraw %} + composition: + version: '2' + services: + db: + image: postgres + composition_candidates: + test_service: + image: {% raw %}${{build_step}}{% endraw %} + command: gulp integration_test + working_dir: /app + environment: + - key=value + composition_variables: + - key=value + fail_fast: false + when: + condition: + all: + notFeatureBranch: 'match("{% raw %}${{CF_BRANCH}}{% endraw %}", "/FB-/", true) == false' + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Caveats on sharing a docker-compose.yml + +Although Codefresh's composition syntax closely follows the syntax used in `docker-compose.yml` files, it is not 100% the same. If you are using `docker-compose.yml` locally, you may experience some problems if you try to have Codefresh reference the file (by passing it as an argument to `compose`, e.g. `compose: docker-compose.yml`). + +One subtle difference is that Docker compose will interpolate environment variables that are quoted in single-braces, e.g. `${DATABASE_URL}`, whereas Codefresh interpolates variables that are quoted in double-braces, e.g. {% raw %}`${{DATABASE_URL}}`{% endraw %}. So if your `docker-compose.yml` file relies on the parsing of ENV variables, it may not be a good candidate for sharing with Codefresh. + +## Fields + +The following describes the fields available in a step of type `composition` + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `working_directory` | The directory in which to search for the composition file. It can be an explicit path in the container's file system, or a variable that references another step. The default is {% raw %}`${{main_clone}}`{% endraw %}. Note that this is completely different from `working_dir` which is on the service level. | Default | +| `composition` | The composition you want to run. This can be an inline YAML definition or a path to a composition file on the file system, e.g. `docker-compose.yml`, or the logical name of a composition stored in the Codefresh system. We support most features of [Docker compose version 2.0](https://docs.docker.com/compose/compose-file/compose-file-v2/) and [3.0](https://docs.docker.com/compose/compose-file/) | Required | +| `version` | Version for docker compose. Use `2` or `3` | Required | +| `composition_candidates` | The definition of the service to monitor. Each candidate has a **single** `command` parameter that decides what will be tested. | Required | +| `environment` (service level) | environment that will be accessible to the container | Optional | +| `working_dir` (service level) | defines the working directory that will be used in a service before running a command. By default it is defined by the docker image that is used by the service. | Optional | +| `registry_contexts` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +| `volumes` (service level) | Extra volumes for individual services. Used for transferring information between your steps. Explained in detail later in this page. | Optional | +| `composition_variables` | A set of environment variables to substitute in the composition. Notice that these variables are docker-compose variables and **NOT** environment variables | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
                                                        You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Composition versus Composition Candidates + +For Codefresh to determine if the step and operations were successfully executed, you must specify at least one `composition_candidate`. + +A `composition_candidate` is a single service component of the normal Docker composition that is monitored for a successful exit code and determines the outcome of the step. During runtime, the `composition_candidate` is merged into the specified `composition`and is monitored for successful execution. + +The critical part of each candidate is the `command` parameter. This takes [a single command](https://docs.docker.com/compose/compose-file/#command) that will +be executed inside the Docker container of the candidate and will decide if the whole composition is successful or not. Only one command is allowed (similar to Docker compose). If you wish to test multiple commands you need to connect them with `&&` like this. + +{% highlight yaml %} + composition_candidates: + my_unit_tests: + image: node + command: bash -c "sleep 60 && pwd && npm run test" +{% endhighlight %} + + +## Working directories in a composition + +By default, all services that take part in a composition will use as working directory the one defined by the respective image. If you want to change that, you need to use the `working_dir` parameter at the service level. + +Here is an example: + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_composition: + type: composition + title: Sample composition + composition: + version: '2' + services: + my_service: + image: alpine + command: 'pwd' + working_dir: /tmp + composition_candidates: + my_test_service: + image: python + working_dir: /root + command: 'pwd' +{% endhighlight %} + +If you run this composition, you will see in the logs that the alpine image will use `/tmp` as a working directory and the python one will use `/root` + +``` +my_service_1 | /tmp +my_test_service_1 | /root +``` + +## Composition networking + +The networking in Codefresh compositions works just like normal Docker-compose. Each service is assigned a hostname that matches +its name and is accessible by other services. + +Here is an example + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + build_step: + type: build + image_name: my-node-app + dockerfile: Dockerfile + tag: ${{CF_BRANCH}} + my_db_tests: + type: composition + composition: + version: '2' + services: + db: + image: mysql:latest + ports: + - 3306 + environment: + MYSQL_ROOT_PASSWORD: admin + MYSQL_USER: my_user + MYSQL_PASSWORD: admin + MYSQL_DATABASE: nodejs + composition_candidates: + test: + image: ${{build_step}} + links: + - db + command: bash -c 'sleep 30 && MYSQL_ROOT_PASSWORD=admin MYSQL_USER=my_user MYSQL_HOST=db MYSQL_PASSWORD=admin MYSQL_DATABASE=nodejs npm test' +{% endraw %} +{% endhighlight %} + +In this composition the MySql instance will be available at host `db:3306` accessible from the node image. When the node tests run, they will be pointed to that host and port combination to access it. + +Notice also that like docker compose the order that the services are launched is not guaranteed. A quick way to solve this issue +is with a sleep statement like shown above. This will make sure that the database is truly up before the tests run. + +A better approach would be to use solutions such as [wait-for-it](https://github.com/vishnubob/wait-for-it) which are much more robust. Here is an example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + build_image: + type: build + description: Building the image... + image_name: my-spring-boot-app + tag: ${{CF_BRANCH_TAG_NORMALIZED}} + build_image_with_tests: + type: build + description: Building the Test image... + image_name: maven-integration-tests + dockerfile: Dockerfile.testing + integration_tests: + type: composition + title: Launching QA environment + description: Temporary test environment + composition: + version: '2' + services: + app: + image: ${{build_image}} + ports: + - 8080 + composition_candidates: + test_service: + image: ${{build_image_with_tests}} + links: + - app + command: bash -c '/usr/bin/wait-for-it.sh -t 20 app:8080 -- mvn verify -Dserver.host=app' +{% endraw %} +{% endhighlight %} + +In this composition a Java application is launched at `app:8080` and then a second image is used for integration tests that target that URL (passed as a parameter to Maven). + +The `wait-for-it.sh` script will make sure that the Java application is truly up before the tests are started. Notice that in the example above the script is included in the testing image (created by `Dockerfile.testing`) + +## Using public Docker images in a composition + +It is important to notice that Docker images used in a composition (both as services and candidates) will be looked from your connected registries first before looking at Dockerhub: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + my_composition: + type: composition + title: Sample composition + composition: + version: '2' + services: + my_service: + image: mysql + ports: + - 3306 + composition_candidates: + my_test_service: + image: alpine + working_dir: /root + command: 'pwd' + +{% endraw %} +{% endhighlight %} + +In the example above if you already have two images in your private registries named `mysql` and `alpine`, then *THEY* will be used instead of the respective images in Dockerhub. + +You can see which images are used in the logs of the builds: + +``` +Running composition step: Sample composition +Pulling kostisazureregistry.azurecr.io/mysql@sha256:1ee5515fed3dae4f13d0f7320e600a38522fd7e510b225e68421e1f90 +Pulling kostisazureregistry.azurecr.io/alpine@sha256:eddb7866364ec96861a7eb83ae7977b3efb98e8e978c1c9277262d327 +``` + + +## Accessing your project folder from a composition + +By default, the services of a composition run in a completely isolated manner. There are several scenarios however where you wish to access your Git files such as: + +* Using test data that is available in the project folder +* Preloading a database with a data script found in Git +* Running integration tests and then using their [results for reporting]({{site.baseurl}}/docs/testing/test-reports/) + +The Codefresh [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) is automatically mounted in [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/) but **NOT** in compositions. You have to mount it yourself if you use that functionality. + +Here is an example where the shared volume is mounted in a composition -- {% raw %}`'${{CF_VOLUME_NAME}}:${{CF_VOLUME_PATH}}'`{% endraw %} is listed under `volumes`: + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + create_test_data_step: + title: Creating dummy data + image: alpine + commands: + - echo "Writing in shared volume" > /codefresh/volume/sample_text.txt + my_sample_composition: + type: composition + title: Composition with volume + composition: + version: '2' + services: + my_sample_service: + image: node + volumes: + - '${{CF_VOLUME_NAME}}:${{CF_VOLUME_PATH}}' + working_dir: '${{CF_VOLUME_PATH}}' + command: bash -c "pwd && cat sample_text.txt" + composition_candidates: + my_unit_tests: + image: python + volumes: + - '${{CF_VOLUME_NAME}}:${{CF_VOLUME_PATH}}' + working_dir: '${{CF_VOLUME_PATH}}' + command: bash -c "pwd && echo 'Finished tests' > test_result.txt" + read_test_data_step: + title: Reading dummy data + image: alpine + commands: + - ls -l /codefresh/volume + - cat /codefresh/volume/test_result.txt +{% endraw %} +{% endhighlight %} + +In this pipeline: + +1. The first freestyle step writes a simple test file in the shared volume. +1. The composition starts and both services (`my_sample_service` and `my_unit_tests`) attach the same volume. +1. The sample service reads from the shared volume (i.e. using test data that was created before). +1. The sample unit test service writes to the shared volume (emulating test results). +1. The last freestyle step reads the file that was written by the composition. + +Therefore, in this pipeline you can see both ways of data sharing, bringing files into a composition and getting results out of it. Notice that we need to mount the shared volume only in the composition services. The freestyle steps automatically mount `/codefresh/volume` on their own. + + +>Note: In order to mount the shared volume in one of your composition services, you must mount it in the `composition_candidate` also. It is not compulsory to mount the shared volume in all services of a composition. Only those that actually use it for file transfer, should mount it. + + +## Composition variables versus environment variables + +Docker compose supports [two kinds of variables in its syntax](https://docs.docker.com/compose/environment-variables/): + +* There are environment variables that are used in the docker-compose file itself (`${VAR}` syntax). +* There are environment variables that are passed in containers (`environment:` yaml group). + +Codefresh supports both kinds, but notice that variables mentioned in the +`composition_variables` yaml group refer to the *first* kind. Any variables defined there are **NOT** passed automatically to containers (use the `environment` yaml group for that purpose). + +This can be illustrated with the following example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + comp1: + type: composition + title: Composition example 1 + description: Free text description + composition: + version: '2' + services: + db: + image: alpine + composition_candidates: + test_service: + image: alpine + command: printenv + environment: + - FIRST_KEY=VALUE + composition_variables: + - ANOTHER_KEY=ANOTHER_VALUE +{% endraw %} +{% endhighlight %} + +If you run the compositio,n you will see that the `printenv` command shows the following: + +``` +test_service_1 | FIRST_KEY=VALUE +``` + +The `FIRST_KEY` variable which is defined explicitly in the `environment` yaml part is correctly passed to the alpine container. The `ANOTHER_KEY` is not visible in the container at all. + +You should use the `composition_variables` yaml group for variables that you wish to reuse in other parts of your composition using the `${ANOTHER_KEY}` syntax. + +## Merging services + +If the `composition` already contains a service with the same name as the `composition_candidate`, the two service definitions are combined, with preference given to the `composition_candidate`'s definition. + +For example, we create a new Codefresh composition named 'test_composition': + + `test-composition.yml` +{% highlight yaml %} +version: '2' + services: + db: + image: postgres + test_service: + image: myuser/mytestservice:latest + command: gulp integration_test +{% endhighlight %} + +Now we want to reuse this composition during our build for testing purposes. +We can add the following composition step to our `codefresh.yml` file and define the composition step so that `test_service` always uses the latest image that was built. + + `YAML` +{% highlight yaml %} +run_tests: + type: composition + composition: test_composition + composition_candidates: + test_service: + image: {% raw %}${{build_step}}{% endraw %} +{% endhighlight %} + +In the above example, both `composition` and `composition_candidates` define a service named `test_service`. After merging these definitions, `test_service` will maintain the `command` that was defined in the original composition but will refer to the image built by the step named `build_step`. + +## Related articles +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) +[Variables]({{site.baseurl}}/docs/pipelines/variables/) +[Introduction to pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) + diff --git a/_docs/pipelines/steps/deploy.md b/_docs/pipelines/steps/deploy.md new file mode 100644 index 00000000..6ad0a4e38 --- /dev/null +++ b/_docs/pipelines/steps/deploy.md @@ -0,0 +1,185 @@ +--- +title: "Deploy" +description: "Deploying to Kubernetes from a Codefresh pipeline" +group: codefresh-yaml +sub_group: steps +redirect_from: + - /docs/deploy/ +toc: true +--- +The *Deploy* step can be used as a step to deploy a pre-built Docker image to a cluster + +This step allows to (re)deploy a Kubernetes application in your cluster + +It has two modes: + +1. Using the `service` option. In this case it will redeploy to an [existing service/deployment in your cluster]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) . Codefresh will +automatically update the service/deployment with the new docker image. +1. Using the `file_path` option. In this case you provide your own Kubernetes manifest and Codefresh deploys it as-is. It is **your +own responsibility** to do [custom replacements]({{site.baseurl}}/docs/deploy-to-kubernetes/kubernetes-templating/) here (for example using [awk](https://en.wikipedia.org/wiki/AWK), [sed](https://www.gnu.org/software/sed/manual/sed.html) or [yq](http://mikefarah.github.io/yq/)). The deploy step is also using the [Codefresh templating mechanism]({{site.baseurl}}/docs/deploy-to-kubernetes/kubernetes-templating/#using-the-codefresh-deploy-image) behind the scenes if you want to take advantage of it. For a full templating solution we also +suggest you look at [Helm]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/). + +You need to define either one of these fields in the deploy step. If you define `service` you also can select the exact Docker image +with the `candidate` field (otherwise Codefresh will just reuse the docker image defined in the existing deployment) + +## Usage + + `YAML` +{% highlight yaml %} + step_name: + title: deploying to cluster + type: deploy + kind: kubernetes + ## cluster name as the shown in account's integration page + cluster: --my-cluster-name-- + # desired namespace + namespace: default + + ## Two ways to distinguish which deployment YAML to deploy - service or file_path: + # The Kubernetes service that associated with the deployment using selector + service: --my-service-- + # Path to deployment.yml location inside the image volume + file_path: ./deployment.yml + # In seconds, how long the step will wait until the rolling update is complete (default is 120) + timeout: '150' + # Candidate is optional, if not specified will redeploy the same image that specified in the deployment file + # When candidate exists it should have both: image and registry + candidate: + # The image that will replace the original deployment image + # The image that been build using Build step + image: {% raw %}${{build_step}}{% endraw %} + # The registry that the user's Kubernetes cluster can pull the image from + # Codefresh will generate (if not found) secret and add it to the deployment so the Kubernetes master can pull it + registry: dockerhub + # Condition to run the step + when: + branch: + only: + - master + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------------------- -------- | ------------------------- | +| `title` | The free-text display name of the step | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/codefresh-yaml/stages/) for more information. | Optional | +| `kind` | Currently only `kubernetes` is supported | Required | +| `cluster` | Name of your K8s cluster as found in the dashboard | Required | +| `namespace` | Namespace where the deployment will take place | Required | +| `service` | Name of the existing service that will updated. You need to provide `service` OR `file_path` | Required/Optional | +| `file_path` | A deployment manifest. You need to provide `service` OR `file_path` | Required/Optional | +| `timeout` | Seconds to wait for the deployment to be completed. Default is 120 seconds | Default | +| `candidate` | Docker image that will be deployed. Only valid if `service` is defined. Should contain `image` and name of registry as it appears in the [registry integration page]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
                                                        You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/piplines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Examples + +Update an existing service using the same Docker image (tagged with branch) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: python-flask-sample-app + working_directory: ./ + tag: ${{CF_BRANCH_TAG_NORMALIZED}} + dockerfile: Dockerfile + deploy_to_k8: + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myDemoAKSCluster + namespace: demo + service: my-python-app +{% endraw %} +{% endhighlight %} + +Update an existing service using a different Docker image (tagged with git hash) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: python-flask-sample-app + working_directory: ./ + tag: ${{CF_SHORT_REVISION}} + dockerfile: Dockerfile + deploy_to_k8: + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myDemoAKSCluster + namespace: demo + service: my-python-app + candidate: + # The image that will replace the original deployment image + # The image that been build using Build step + image: ${{MyAppDockerImage}} + # The registry that the user's Kubernetes cluster can pull the image from + # Codefresh will generate (if not found) secret and add it to the deployment so the Kubernetes master can pull it + registry: cfcr +{% endraw %} +{% endhighlight %} + + +Deploy a custom Kuberentes Manifest as is. (Only a deployment will be created) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: python-flask-sample-app + working_directory: ./ + tag: ${{CF_BRANCH}} + dockerfile: Dockerfile + deploy_to_k8: + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myDemoAKSCluster + namespace: demo + file_path: ./deploy/deployment.yml +{% endraw %} +{% endhighlight %} + +## Advanced Kubernetes deployments + +If you find the deploy step limited, feel free to look at the other deployment options offered by Codefresh: + +* [The cf-deploy-kubernetes step]({{site.baseurl}}/docs/deploy-to-kubernetes/kubernetes-templating/) +* [Custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/) +* [Helm]({{site.baseurl}}/docs/getting-started/helm-quick-start-guide/) + +## Related articles +[Kubernetes Quick start guide]({{site.baseurl}}/docs/getting-started/deployment-to-kubernetes-quick-start-guide/) +[Manage your Kubernetes cluster]({{site.baseurl}}/docs/deploy-to-kubernetes/manage-kubernetes/) +[Install HELM chart using Codefresh pipeline]({{site.baseurl}}/docs/new-helm/using-helm-in-codefresh-pipeline/) + + + diff --git a/_docs/pipelines/steps/freestyle.md b/_docs/pipelines/steps/freestyle.md new file mode 100644 index 00000000..4610bd95 --- /dev/null +++ b/_docs/pipelines/steps/freestyle.md @@ -0,0 +1,352 @@ +--- +title: "Freestyle" +description: "Run commands inside a Docker container" +group: codefresh-yaml +sub_group: steps +redirect_from: + - /docs/freestyle/ +toc: true +--- +The Freestyle step is designed so you can execute a series of commands in a container. Freestyle steps +are the bread and butter of [Codefresh pipelines]({{site.baseurl}}/docs/configure-ci-cd-pipeline/introduction-to-codefresh-pipelines/). + +## Purpose of freestyle steps + +In Codefresh, docker containers are first-class citizens +and special typed steps are offered for the most usual docker commands. Freestyle steps are a secure replacement for `docker run` commands. + + +Therefore, this command on your local workstation: + +``` +docker run python:3.6.4-alpine3.6 pip install . +``` + +will become in Codefresh the following freestyle step. + +```yaml +CollectAllMyDeps: + title: Install dependencies + image: python:3.6.4-alpine3.6 + commands: + - pip install . +``` + + +Select an image to start a container, then you can specify a working directory, and commands. +If you do not specify a working directory or commands, the step runs the organic commands specified by the image. +In all freestyle steps Codefresh automatically [uses a shared docker volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) that contains your git source code. + +## Usage + + `YAML` +{% highlight yaml %} +{% raw %} +step_name: + title: Step Title + description: Step description + image: image/id + working_directory: ${{step_id}} + commands: + - bash-command1 + - bash-command2 + cmd: + - arg1 + - arg2 + environment: + - key=value + entry_point: + - cmd + - arg1 + shell: sh + fail_fast: false + volumes: + - ./relative-dir-under-cf-volume1:/absolute-dir-in-container1 + - ./relative-dir-under-cf-volume2:/absolute-dir-in-container2 + when: + branch: + only: [ master ] + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endraw %} +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `image` | The image from which the executable container is created. It can be an explicit ID of a Docker image, or a variable that references a **Build** or **Push** step. | Required | +| `working_directory` | The directory from which the commands are executed. It can be an explicit path in the container's file system, or a variable that references another step. The default `working_directory` is the cloned repository directory and not the working directory specified by the image. If you need to use the default working directory of the image use `IMAGE_WORK_DIR`. | Default | +| `commands` | One or more commands to execute in a shell in the container, as array of strings. | Optional | +| `cmd` | docker CMD arguments to use along with the container entry point. can be string or array of strings. | Optional | +| `entry_point` | Override the default container entry point. can be string or array of strings. | Optional | +| `shell` | Explicitly set the executing shell to bash or sh. If not set the default will be sh. Note the `bash` option requires that you specify an `image` that includes `/bin/bash`; many images do not. | Optional | +| `environment` | A set of environment variables for the container. | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `registry_context` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +| `volumes` | One or more volumes for the container. All volumes must be mounted from the existing shared volume (see details below) |Optional +| `when` | Define a set of conditions that need to be satisfied in order to execute this step. You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +**Exported resources:** +- Working Directory. + +## Examples + +Here are some full pipelines with freestyle steps. Notice that in all cases the pipelines are connected to [git repositories]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-creation-modes) +so the source code is already checked out and available to all pipeline steps. + +**Creating a [JAR file]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/):** + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_jar_compilation: + title: Compile/Unit test + image: maven:3.5.2-jdk-8-alpine + commands: + - mvn -Dmaven.repo.local=/codefresh/volume/m2_repository package +{% endhighlight %} + +Note how we [cache Maven dependencies]({{site.baseurl}}/docs/learn-by-example/java/spring-boot-2/#caching-the-maven-dependencies) using the internal Codefresh Volume. + +**Running unit tests in [Node.JS]({{site.baseurl}}/docs/learn-by-example/nodejs/):** + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_node_app: + title: Running unit tests + image: node:11 + commands: + - npm install + - npm run test +{% endhighlight %} + +**Packaging a [GO application]({{site.baseurl}}/docs/learn-by-example/golang/golang-hello-world/):** + +`codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + my_go_app: + title: Compiling GO code + image: golang:1.7.1 + commands: + - go get github.com/example-user/example-repo + - go build +{% endhighlight %} + +**Performing a [blue/green deployment](https://github.com/codefresh-io/k8s-blue-green-deployment):** + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + blueGreenDeploy: + title: Deploying new version + image: codefresh/k8s-blue-green:master + environment: + - SERVICE_NAME=my-demo-app + - DEPLOYMENT_NAME=my-demo-app + - NEW_VERSION=${{CF_SHORT_REVISION}} + - HEALTH_SECONDS=60 + - NAMESPACE=colors + - KUBE_CONTEXT=myDemoAKSCluster +{% endraw %} +{% endhighlight %} + +## Dynamic freestyle steps + +Codefresh has the unique ability to allow you to run freestyle steps in the context of a docker image +created on the same pipeline. This means that you can dynamically [create docker images]({{site.baseurl}}/docs/pipelines/steps/build/) on demand within the pipeline +that needs them. + +Creating a custom docker image with extra tools (Terraform and Ansible) + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + CreateMyCustomImage: + title: Creating custom Docker image + type: build + dockerfile: tf_and_ansible.Dockerfile + image_name: my-iac-tools-container + UseMyCustomImage: + title: Running IAC tools + image: ${{CreateMyCustomImage}} + commands: + - terraform --version + - ansible --version +{% endraw %} +{% endhighlight %} + +Here the `UseMyCustomImage` freestyle step is running in the [context]({{site.baseurl}}/docs/pipelines/variables/#context-related-variables) of the Docker image that was created in the previous step. +In fact, a very common pattern that you will see in Codefresh pipelines is the executions of [unit tests]({{site.baseurl}}/docs/testing/unit-tests/) in the image that was created in a build step: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - ./my-unit-tests.sh +{% endraw %} +{% endhighlight %} + +Here the `MyAppDockerImage` step is creating a custom docker image. That image is used to run the `MyUnitTests` step. +This pattern works very well for cases where testing tools are already part of the image (usually with dynamic languages). +In other case you can have a second Dockerfile in your application that is designed explicitly to hold all your testing tools. + +## Entry point + +When using the original container entry point, you can use the `cmd` field to specify additional arguments to be used with the entry point. This can be a string, or an array of strings. For example: + +```yaml +image: mwendler/cowsay +cmd: + - "Hello" +``` + +is equivalent to running `docker run mwendler/cowsay Hello` which is equivalent to running `cowsay Hello` inside the container. + + +You can override the container's default entry point using the `entry_point` field. This can be a string, or an array of strings. For example: + +```yaml + +image: mwendler/cowsay +entry_point: + - echo + - Hello +``` + +## Commands + +When you use the `commands` field, it will override the container original `entry_point` and will execute the commands in a shell inside the container. +The provided commands are concatenated into a single command using the shell's `;` operator, and are run using the default shell `/bin/sh` as an entry point. +Additional settings that are set only when using commands are `set -e`, and the [`cf_export`]({{site.baseurl}}/docs/pipelines/variables/#using-cf_export-command) utility. + +> Using complex commands in the freestyle step requires use of [YAML block scalars](http://stackoverflow.com/questions/3790454/in-yaml-how-do-i-break-a-string-over-multiple-lines). + +### Commands and Entry point + +If you want to retain the original entry point, do not use the `commands` field. + +However, this example: + +```yaml +image: mwendler/cowsay +commands: + - "Hello" +``` + +will cause and error because the engine will attempt to run the command `Hello` in a shell inside the container, and the command `Hello` is not a valid command. +In order to use the `commands` form with an `entrypoint` enabled container, you can add the commands from the entry point to the list of commands, like so: + +```yaml +image: mwendler/cowsay +commands: + - cowsay "Hello" +``` + +## Custom volumes + +If you are familiar with [Codefresh pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) you should know that all freestyle steps automatically share a [volume](https://docs.docker.com/storage/) mounted at `/codefresh/volume` which can be used to transfer data (e.g. dependencies and test results) from each step to the next. + +**This volume is automatically mounted by Codefresh and needs no configuration at all**. All you have to do to access it, is read/write the `/codefresh/volume` folder from your application. This folder also [includes by default the source code]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) of the git repository connected to the pipeline (at the `/codefresh/volume/` subfolder) + +You can use the `volumes` property to create your own custom volumes that can be mounted in different folders. **For security reasons however all source volume data (i.e. the "host" folder) still needs to be bound with `/codefresh/volume` or any of its subdirectories**: + +Attempting to mount a folder outside of `/codefresh/volume` will result in an error. + +### Simple volume example + +Let's assume that your application expects to find a configuration folder at `/config`. The folder however that contains the needed files in GIT is under `my-app-repo/my-sample-config`. When the application is checked out the files actually reside at `/codefresh/volume/my-app-repo/my-sample-config`. + +You can still run your application without any code changes by doing the following bind: + +```yaml +title: Running my application with custom volume +image: my-docker-app:latest +volumes: + - ./my-app-repo/my-sample-config:/config # host path is relative to /codefresh/volume +``` + +Now the `my-docker-app` application will run and find all its needed files at `/config`. + +Notice that we use a relative path here but even if you used an absolute one (`/my-app/my-sample-config`) the result would be the same because Codefresh does not allow you to bind anything outside the shared Codefresh volume. + +### Injecting custom folders in a running container + +Here is another example pipeline with two steps. The first one creates a custom config file in the shared Codefresh volume (that is always available) at `/codefresh/volume/my-config`. The second step reads the config file at a different folder in `/my-own-config-folder-injected`. + +```yaml +version: '1.0' +steps: + CreateCustomConfiguration: + title: Creating configuration + image: alpine + commands: + - mkdir -p /codefresh/volume/my-config + - echo "foo=bar" > /codefresh/volume/my-config/custom.txt + - ls /codefresh/volume/my-config + InjectConfiguration: + title: Reading configuration + image: alpine + commands: + - ls /codefresh/volume/my-config # Codefresh default volume shared between all steps + - ls /my-own-config-folder-injected # Special volume just for this container + - cat /my-own-config-folder-injected/custom.txt + volumes: + - ./my-config:/my-own-config-folder-injected +``` + +When the second steps runs, the `custom.txt` file is available both at `/codefresh/volume/my-config` (the shared volume of all steps) as well as the `/my-own-config-folder-injected` folder which was mounted specifically for this step. + + +## More freestyle steps + +You can use in a freestyle step any Docker image available in a public repository such as Dockerhub. This makes the integration of Codefresh and various cloud tools very easy. + +Codefresh also offers a plugin directory at [http://codefresh.io/steps/](http://codefresh.io/steps/) created specifically for CI/CD operations. + +{% include +image.html +lightbox="true" +file="/images/pipeline/plugin-directory.png" +url="/images/pipeline/plugin-directory.png" +alt="Codefresh steps directory" +caption="Codefresh steps directory" +max-width="80%" +%} + + +## Related articles +[Introduction to Pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) +[Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) + diff --git a/_docs/pipelines/steps/git-clone.md b/_docs/pipelines/steps/git-clone.md new file mode 100644 index 00000000..017e3213 --- /dev/null +++ b/_docs/pipelines/steps/git-clone.md @@ -0,0 +1,438 @@ +--- +title: "Git-Clone" +description: "Checkout code in your pipelines" +group: pipelines +sub_group: steps +redirect_from: + - /docs/git-clone/ +toc: true +--- +Clones a Git repository to the filesystem. + +A pipeline can have any number of Git clone steps (even none). You can checkout code from any private or public repository. Cloning a repository is not constrained to the trigger of a pipeline. You can trigger a pipeline from a commit that happened on Git repository A while the pipeline is checking out code from Git Repository B. + +>Notice that if you are an existing customer before May 2019, Codefresh will automatically checkout the code from a [connected git repository]({{site.baseurl}}/docs/integrations/git-providers/) when a pipeline is created on that repository. In this case an implicit git clone step is included in your pipeline. You can still override it with your own git clone step as explained in this page + +## Usage + + `YAML` +{% highlight yaml %} +step_name: + type: git-clone + title: Step Title + description: Step description + working_directory: /path + repo: owner/repo + git: my-git-provider + revision: abcdef12345' + use_proxy: false + credentials: + username: user + password: credentials + fail_fast: false + when: + branch: + ignore: [ develop ] + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `working_directory` | The directory to which the repository is cloned. It can be an explicit path in the container's file system, or a variable that references another step. The default value is {% raw %}`${{main_clone}}`{% endraw %}, but note that the default will only be used if you name your step `main_clone`. See the example on [working inside the cloned directory]({{site.baseurl}}/docs/yaml-examples/examples/git-checkout/#working-inside-the-cloned-directory) for more information. | Default | +| `git` | The name of the [git integration]({{site.baseurl}}/docs/integrations/git-providers/) you want to use. If left empty, Codefresh will attempt to use the git provider that was used during account sign-up. Note that this might have unexpected results if you are changing your Git integrations.| Required| +| `repo` | path of the repository without the domain name in the form of `my_username/my_repo` | Required | +| `revision` | The revision of the repository you are checking out. It can be a revision hash or a branch name. The default value is the branch you have specified in your Git provider (e.g `master` or `main`). | Default | +| `use_proxy` | If set to true the Git clone process will honor `HTTP_PROXY` and `HTTPS_PROXY` variables if present for [working via a proxy](#using-git-behind-a-proxy). Default value is `false`. | Default | +| `credentials` | Credentials to access the repository, if it requires authentication. It can an object containing `username` and `password` fields. Credentials are optional if you are using the [built-in git integrations]({{site.baseurl}}/docs/integrations/git-providers/) . | Optional | +| `fail_fast` | If a step fails and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions that need to be satisfied in order to execute this step. You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/codefresh-yaml/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +**Exported resources:** +- Working Directory + +{{site.data.callout.callout_info}} +If you want to extend the git-clone step you can use the freestyle step. Example how to do it you can find [here]({{site.baseurl}}/docs/yaml-examples/examples/git-clone-private-repository-using-freestyle-step/) +{{site.data.callout.end}} + +## Basic clone step (project-based pipeline) + +The easiest way to use a git clone step is to use your default git provider as configured in [built-in git integrations]({{site.baseurl}}/docs/integrations/git-providers/). + +Here is an example of a pipeline that will automatically check out the repository that triggered it (i.e. a commit happened on that repository). + +>Notice that the name of the clone step is `main_clone`. This will automatically set the working directory of all other steps that follow it **inside** the folder of the project that was checked out. This only applies to [built-in]({{site.baseurl}}/docs/pipelines/steps/#built-in-steps) Codefresh steps and not [custom plugins]({{site.baseurl}}/docs/pipelines/steps/#creating-a-typed-codefresh-plugin). This is normally what you want for a pipeline that only checks out a single project. If you use any other name apart from `main_clone` the working directory for all subsequent steps will not be affected and it will default on the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) which is the [parent folder]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) of checkouts. + + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: my-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +The CF values will be automatically filled by Codefresh from the git trigger. See the [variables page]({{site.baseurl}}/docs/pipelines/variables/) for more details. + +## Choosing a specific git provider (project-based pipeline) + +If you don't want to use the default git provider you can explicitly set the provider by using the same name of the integration as it is shown in [the git integrations page]({{site.baseurl}}/docs/integrations/git-providers/). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/example-git-providers.png" +url="/images/pipeline/codefresh-yaml/steps/example-git-providers.png" +alt="Example git integrations" +caption="Example git integrations" +max-width="40%" +%} + +Here is an example for an integration with the GitLab provider already connected: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: '${{CF_REVISION}}' + git: my-gitlab + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +## Checkout a specific repository/revision (project based pipeline) + +If you want to check out a specific git repository regardless of what repository actually created the trigger +you can just define all values in a non-static manner. For example, if you want your pipeline to always checkout git repository `foo` even when the trigger happened from repository `bar` you can define the checkout step as below: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: 'my-github-username/foo' + revision: '${{CF_REVISION}}' + git: my-github-integration + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +In a similar manner you can also define that the pipeline will always checkout master, regardless of the commit that actually triggered it. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + revision: 'master' + git: my-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +## Checkout code using the Codefresh Runner + +If you are using the [Codefresh runner]({{site.baseurl}}/docs/installation/codefresh-runner/), you need to use +the fully qualified path of the Git repository: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: 'Cloning main repository...' + type: git-clone + repo: https://github-internal.example.com/my-username/my-app + revision: '${{CF_REVISION}}' + git: my-internal-git-provider + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + +More details can be found in the [private Git instructions page]({{site.baseurl}}/docs/reference/behind-the-firewall/#checking-out-code-from-a-private-git-repository). + + +## Checking out multiple Git repositories + +It is very easy to checkout additional repositories in a single pipeline by adding more `git-clone` steps. +In that case you should use different names for the steps (instead of `main_clone`) as this will make the working +folder for all steps the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + my_first_checkout: + title: 'Cloning first repository...' + type: git-clone + repo: 'my-gitlab-username/foo' + revision: '${{CF_REVISION}}' + git: my-gitlab-integration + my_second_checkout: + title: 'Cloning second repository...' + type: git-clone + repo: 'my-github-username/bar' + revision: '${{CF_REVISION}}' + git: my-github-integration + PrintFileList: + title: 'Listing files' + image: alpine:latest + commands: + - 'ls -l' +{% endraw %} +{% endhighlight %} + + +## Skip or customize default clone (repository-based pipeline) + +If you have existing pipelines connected to repositories (only for Codefresh accounts created before May 2019) +a git clone step is transparently added to git attached pipelines without you having to explicitly add a step into the pipeline. This is a convenience to enable easy CI pipelines. +If you do not require git cloning, or you would like to customize the implicit git cloning behavior, you can choose to skip the automatically added git clone step. + +There are 2 ways to do that: + +1. Add a pipeline environment variable called `CF_SKIP_MAIN_CLONE` with value of `true`. + +-or- + +2. Add a step with key `main_clone` to your pipeline. This step can be of any type and can do any action. This step will override the default clone implementation. for example: + +```yaml +version: '1.0' +steps: + main_clone: + title: Checking out code + image: alpine/git:latest + commands: + - git clone ... + another_step: + ... +``` + +## Reuse a Git token from Codefresh integrations + +You also have the capability to use one of your existing [git integrations]({{site.baseurl}}/docs/integrations/git-providers/) +as an authentication mechanism. + +The [Codefresh CLI](https://codefresh-io.github.io/cli/) can read one of the connected [git authentication contexts](https://codefresh-io.github.io/cli/contexts/get-context/) and use that token for a custom clone step. + +Here is an example for GitHub + + +```yaml +version: '1.0' +steps: + get_git_token: + title: Reading GitHub token + image: codefresh/cli + commands: + - cf_export GITHUB_TOKEN=$(codefresh get context github --decrypt -o yaml | yq -r .spec.data.auth.password) + main_clone: + title: Checking out code + image: alpine/git:latest + commands: + - git clone https://my-github-username:$GITHUB_TOKEN@github.com/my-github-username/my-repo.git + another_step: + ... +``` + +## Working with GIT submodules + +To checkout a git project including its submodules you can use the [Codefresh submodule plugin](https://github.com/codefresh-io/plugins/tree/master/plugins/gitsubmodules). This plugin is already offered as a public docker image at [Dockerhub](https://hub.docker.com/r/codefresh/cfstep-gitsubmodules/tags). + +To use this module in your pipeline, add a new step like the one shown below. + +```yaml +version: '1.0' +steps: + updateSubmodules: + image: codefresh/cfstep-gitsubmodules + environment: + - GITHUB_TOKEN= + - CF_SUBMODULE_SYNC= + - CF_SUBMODULE_UPDATE_RECURSIVE= +``` + +The GitHub token can be either defined in the pipeline on its own as an environment variable, or fetched from +the existing [GIT integration]({{site.baseurl}}/docs/integrations/git-providers/) as shown in the previous section. + +Here is full pipeline example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: + - checkout + - prepare + - build +steps: + clone: + title: Cloning the repository + type: git-clone + stage: checkout + arguments: + repo: '${{CF_REPO_OWNER}}/${{CF_REPO_NAME}}' + git: github + revision: '${{CF_REVISION}}' + + updateSubmodules: + image: codefresh/cfstep-gitsubmodules + stage: prepare + working_directory: '${{clone}}' + environment: + - GITHUB_TOKEN=${{MY_GITHUB_TOKEN}} + docker_build: + title: Building docker image + type: build + stage: build + working_directory: '${{clone}}/k8s/docker' + tag: current + disable_push: true + image_name: 'my-docker-image' + +{% endraw %} +{% endhighlight %} + +This pipeline does the following: + +1. Clones the main source code +1. Updates submodules +1. Creates a docker image + + +## Use an SSH key with Git + +It is also possible to use an SSH key with git. When [creating your pipeline]({{site.baseurl}}/docs/pipelines/pipelines/) add your SSH key as an encrypted +environment variable after processing it with `tr`: + +``` +cat ~/.ssh/my_ssh_key_file | tr '\n' ',' +``` + + +Then in the pipeline use it like this: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + title: Checking out code + image: alpine/git:latest + commands: + - mkdir -p ~/.ssh + - echo "${SSH_KEY}" | tr \'"${SPLIT_CHAR}"\' '\n' > ~/.ssh/id_rsa + - chmod 600 ~/.ssh/id_rsa + - git clone git@github.com:my-github-username/my-repo.git + # can also use go get or other similar command that uses git internally + another_step: + ... +{% endraw %} +{% endhighlight %} + +## Using Git behind a proxy + +If you use the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) and need to use a network proxy in your clone step you need to set the [variables]({{site.baseurl}}/docs/pipelines/variables/) `HTTP_PROXY` and/or `HTTPS_PROXY` in the pipeline +and then activate the property `use_proxy: true` in the clone step. Example: + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + clone: + title: "Cloning repository" + type: "git-clone" + repo: "https://github.com/my-github-user/my-repo/" + revision: "master" + use_proxy: true + git: my-git-provider +{% endraw %} +{% endhighlight %} + +For setting the values of the proxy variables you can use any of the supported methods for defining variables such as [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/). + + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/steps/proxy-variables.png" +url="/images/pipeline/codefresh-yaml/steps/proxy-variables.png" +alt="Pipeline variable" +caption="Pipeline variable" +max-width="40%" +%} + +For more details see the [behind the firewall page]({{site.baseurl}}/docs/installation/behind-the-firewall/). + + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Git integrations]({{site.baseurl}}/docs/integrations/git-providers/) +[YAML steps]({{site.baseurl}}/docs/pipelines/steps/) +[Git Checkout Examples]({{site.baseurl}}/docs/yaml-examples/examples/git-checkout/) +[Custom Git Commands]({{site.baseurl}}/docs/yaml-examples/examples/git-checkout-custom/) + + + + + + diff --git a/_docs/pipelines/steps/launch-composition.md b/_docs/pipelines/steps/launch-composition.md new file mode 100644 index 00000000..a3bce2d5 --- /dev/null +++ b/_docs/pipelines/steps/launch-composition.md @@ -0,0 +1,92 @@ +--- +title: "Launch-Composition" +description: "Create a test environment with its dependencies in Codefresh infrastructure" +group: pipelines +sub_group: steps +redirect_from: + - /docs/launch-composition-2/ + - /docs/codefresh-yaml/steps/launch-composition-2/ +toc: true +--- +The Launch Composition step provides the ability to launch long term running environments that can live outside the context of a running pipeline. +You can use this step to automate your test environment creation through a codefresh.yml file instead of manually launching an environment from the UI. + +>Note that "launch-composition" creates a permanent test environment that keeps running even after a pipeline has finished. If you just want temporary test environments that run *only while* a pipeline is running, see [service containers]({{site.baseurl}}/docs/pipelines/service-containers/) and the documentation page for [integration tests]({{site.baseurl}}/docs/testing/integration-tests/). + +## Usage + + `ui defined composition` +{% highlight yaml %} +step_name: + title: Step Title + type: launch-composition + composition: 'ui_defined_composition_name' + environment_name: 'environment name' + on_success: + ... + on_fail: + ... + on_finish: + ... +{% endhighlight %} + + `inline composition` +{% highlight yaml %} +step_name: + type: launch-composition + composition: + version: '2' + services: + app: + image: owner/app:latest + db: + image: mongo + environment_name: 'environment name' + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... +{% endhighlight %} + + `from file composition` +{% highlight yaml %} +step_name: + type: launch-composition + working_directory: ${{a_clone_step}} + composition: './path/to/docker-compose.yaml' + environment_name: 'environment name' + on_success: + ... + on_fail: + ... + on_finish: + ... +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `working_directory` | The directory in which to search for the composition file. It can be an explicit path in the container's file system, or a variable that references another step.
                                                        The default is {% raw %}`${{main_clone}}`{% endraw %}. | Default | +| `composition` | The composition you want to run. It can be an inline YAML definition, a path to a composition file on the file system, or the logical name of a composition stored in the Codefresh system. | Required | +| `environment_name` | The environment name that will be given. In case a previous environment exists with the same name, it will first be terminated. The default value will the be the name/path provided in the 'composition' field. | Default | +| `composition_variables` | A set of environment variables to substitute in the composition. | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
                                                        You can find more information in the [[Conditional Execution of Steps]({{ site.baseurl }}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{ site.baseurl }}/docs/pipelines/post-step-operations/). | Optional | +| entry_point | The name of main service | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Related articles +[Preview environments]({{site.baseurl}}/docs/getting-started/on-demand-environments/) +[Launch Composition example]({{site.baseurl}}/docs/yaml-examples/examples/launch-composition/) +[Integration tests]({{site.baseurl}}/docs/testing/integration-tests/) +[Service Containers]({{site.baseurl}}/docs/pipelines/service-containers/) \ No newline at end of file diff --git a/_docs/pipelines/steps/push.md b/_docs/pipelines/steps/push.md new file mode 100644 index 00000000..699f0200 --- /dev/null +++ b/_docs/pipelines/steps/push.md @@ -0,0 +1,257 @@ +--- +title: "Push step" +description: "Pushing Docker images from your pipeline" +group: pipelines +sub_group: steps +redirect_from: + - /docs/push-1/ + - /docs/codefresh-yaml/steps/push-1/ +toc: true +--- + +{{site.data.callout.callout_info}} + +If you use only the default Docker registry of your account this step is optional as all successful Codefresh pipelines automatically push the Docker image they create in the default Docker registry. No further configuration is needed to achieve this behavior. +{{site.data.callout.end}} + +Push a built image to a remote Docker registry with one or more tags. Supports standard Docker registries and ECR. + +Notice that when you use [any external registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/), you need to comply to the naming pattern used by that registry, otherwise the build step will fail. For example, if your Codefresh image is tagged as `foo_username/my_image` but your Dockerhub account is `bar_username` then the build will fail and you need to customize the push step to use `bar_username` instead. This is a limitation of external registries such as Dockerhub. + +## Usage + + `YAML` +{% highlight yaml %} +step_name: + type: push + title: Step Title + description: Free text description + candidate: {% raw %}${{build_step_name}}{% endraw %} + tag: latest + image_name: codefresh/app + registry: my-registry + fail_fast: false + when: + branch: + only: + - /FB-/i + on_success: + ... + on_fail: + ... + on_finish: + ... + retry: + ... + +{% endhighlight %} + +## Fields + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `stage` | Parent group of this step. See [using stages]({{site.baseurl}}/docs/pipelines/stages/) for more information. | Optional | +| `candidate` | The identifier of the image to push to the remote Docker registry. It can be an explicit identifier of an image to push, or a variable that references a `Build` step. | Required | +| `tag` | The tag under which to push the image. Use either this or `tags`.
                                                        The default is `latest`. | Default | +| `region` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The names of the regions for which to perform cross-region replication. The names of the source region and the destination region name must be defined in separate steps. | Optional | +| `role_arn` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The role with the required permissions to use to pull the image. For example, `arn:aws:iam:::role/` | Required | +| `aws_session_name` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The name of the AWS session. If not defined, `default-session-name` is used. | Default | +| `aws_duration_seconds` | Relevant only for [Amazon ECR]({{site.baseurl}}/docs/integrations/docker-registries/amazon-ec2-container-registry/) integrations using either service accounts or explicit credentials. The length of time, in seconds, for which the role credentials are considered valid, and must be between `900-3600` seconds. If not defined, the duration is set to the default of `3600` seconds. | Default | +| `tags` | Multiple tags under which to push the image. Use either this or `tag`. This is an array, so should be of the following style:
                                                        {::nomarkdown}
                                                        tags:
                                                        - tag1
                                                        - tag2
                                                        - {% raw %}${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}}{% endraw %}
                                                        - tag4
                                                        {:/}or
                                                        {::nomarkdown}
                                                        tags: [ 'tag1', 'tag2', '{% raw %}${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}}{% endraw %}', 'tag4' ]
                                                        {:/} | Default | +| `image_name` | The tagged image name that will be used The default value will be the same image name as of the candidate. | Default | +| `registry` | The registry logical name of one of the inserted registries from the integration view.
                                                        The default value will be your default registry [if you have more than one]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). | Default | +| `registry_context` | Advanced property for resolving Docker images when [working with multiple registries with the same domain]({{site.baseurl}}/docs/docker-registries/working-with-docker-registries/#working-with-multiple-registries-with-the-same-domain) | Optional | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. | Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
                                                        You can find more information in the [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps/) article. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/). | Optional | +| `retry` | Define retry behavior as described in [Retrying a step]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/#retrying-a-step). | Optional | + +## Examples + +Push an image to a registry connected with the [integration name]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) of `myazureregistry`. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + pushToMyRegistry: + stage: 'my push phase' + type: push + title: Pushing to a registry + candidate: ${{MyAppDockerImage}} + tag: ${{CF_SHORT_REVISION}} + registry: myazureregistry +{% endraw %} +{% endhighlight %} + +Push an image as the name of the branch in the [external registry]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) and also use a different image than the default. The same image will also by pushed as `latest` in the internal Codefresh registry (with the default name of `my-app-image`). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + tag: latest + pushToMyRegistry: + stage: 'my push phase' + type: push + title: Pushing to a registry + candidate: ${{MyAppDockerImage}} + tag: ${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}} + registry: myazureregistry + image_name: my-user-name/a-different-image-name +{% endraw %} +{% endhighlight %} + + +Push an image with multiple tags. + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + pushToMyRegistry: + stage: 'my push phase' + type: push + title: Pushing to a registry + candidate: ${{MyAppDockerImage}} + tags: + - ${{CF_SHORT_REVISION}} + - latest + - 2.0.0 + registry: myazureregistry +{% endraw %} +{% endhighlight %} + +Push an image with multiple tags to multiple Docker registries in [parallel]({{site.baseurl}}/docs/pipelines/advanced-workflows/). +Both registries are connected first in the [integrations page]({{site.baseurl}}/docs/docker-registries/external-docker-registries/). + + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +stages: +- 'my build phase' +- 'my push phase' +steps: + MyAppDockerImage: + title: Building Docker Image + stage: 'my build phase' + type: build + image_name: my-app-image + dockerfile: Dockerfile + PushingToRegistries: + type: parallel + stage: 'push' + steps: + PushingToGoogleRegistry: + type: push + title: Pushing To Google Registry + candidate: ${{MyAppDockerImage}} + tags: + - ${{CF_BUILD_ID}} + - latest + - production + registry: gcr + PushingToDockerRegistry: + type: push + title: Pushing To Dockerhub Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_SHORT_REVISION}}' + image_name: my-docker-hub-username/my-app-name + registry: dockerhub +{% endraw %} +{% endhighlight %} + + +## Using passed credentials without pre-saving them + +This option enables you to push your images without pre-saving the credentials in Codefresh's registry integration view. + +>Note that this method of pushing images is offered as a workaround. The suggested way is to use the [central Codefresh integration for registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) as explained in the previous section. + + `YAML` +{% highlight yaml %} +step_name: + type: push + title: Step Title + description: Free text description + candidate: {% raw %}${{build_step_name}}{% endraw %} + tags: [ latest, {% raw %}${{CF_BRANCH}}{% endraw %} ] + image_name: codefresh/app + registry: dtr.host.com + credentials: + username: subject + password: credentials + fail_fast: false + when: + branch: + only: + - /FB-/i + on_success: + ... + on_fail: + ... + on_finish: + ... +{% endhighlight %} + +{: .table .table-bordered .table-hover} +| Field | Description | Required/Optional/Default | +| ---------------------------- | ------------------------------------ | ----------------------------------------------- | +| `title` | The free-text display name of the step. | Optional | +| `description` | A basic, free-text description of the step. | Optional | +| `provider` | The type of Docker registry provider. Can currently be either `docker` for a standard Docker registry, or `ecr` for the [Amazon EC2 Container Registry (ECR)](https://aws.amazon.com/ecr/). | Optional
                                                        *Default value*: `docker` | +| `candidate` | The identifier of the image to push to the remote Docker registry. It can be an explicit identifier of an image to push, or a variable that references a `Build` step. | Required | +| `tag` | The tag under which to push the image. Use either this or `tags`.
                                                        The default is `latest`. | Default | +| `tags` | Multiple tags under which to push the image. Use either this or 'tag'.
                                                        This is an array, so should be of the following style:
                                                        {::nomarkdown}
                                                        tags:
                                                        - tag1
                                                        - tag2
                                                        - {% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}
                                                        - tag4
                                                        {:/}or
                                                        {::nomarkdown}
                                                        tags: [ 'tag1', 'tag2', '{% raw %}${{CF_BRANCH_TAG_NORMALIZED}}{% endraw %}', 'tag4' ]
                                                        {:/} | Default | +| `image_name` | The tagged image name that will be used. The default value will be the same image name as of the candidate. | Default | +| `registry` | The host address where the registry is located. The default is the registry configured in your Codefresh account, or Dockerhub. | Default
                                                        **Ignored when provider is** `ecr` | +| `credentials` | Credentials to access the registry if it requires authentication. It can be a has object containing `username` and `password` fields. The default is the credentials configured in your Codefresh account. | Optional
                                                        **Ignored when provider is** `ecr` | +| `accessKeyId` | Your AWS access key. | Optional
                                                        **Ignored when provider is** `docker` | +| `secretAccessKey` | Your AWS secret access key. | Optional
                                                        **Ignored when provider is** `docker` | +| `region` | The region where the ECR registry is accessible. | Optional
                                                        **Ignored when provider is** `docker` | +| `fail_fast` | If a step fails, and the process is halted. The default value is `true`. |Default | +| `when` | Define a set of conditions which need to be satisfied in order to execute this step.
                                                        You can find more information in [Conditional Execution of Steps]({{site.baseurl}}/docs/pipelines/conditional-execution-of-steps. | Optional | +| `on_success`, `on_fail` and `on_finish` | Define operations to perform upon step completion using a set of predefined [Post-Step Operations]({{site.baseurl}}/docs/pipelines/post-step-operations/).| Optional | + +**Exported resources:** +- Image ID. + +## Related articles +[External Registry integrations]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) +[Custom Image annotations]({{site.baseurl}}/docs/docker-registries/metadata-annotations/) +[Pipeline steps]({{site.baseurl}}/docs/pipelines/steps/) \ No newline at end of file diff --git a/_docs/pipelines/triggers.md b/_docs/pipelines/triggers.md new file mode 100644 index 00000000..621d7792 --- /dev/null +++ b/_docs/pipelines/triggers.md @@ -0,0 +1,114 @@ +--- +title: "Triggers in CI pipelines" +description: "Choose when your pipelines should run" +group: pipelines +redirect_from: + - /docs/pipeline-triggers/ + - /docs/pipeline-triggers/introduction-triggers/ +toc: true +--- + + +To create an effective CI/CD process, it should be possible to trigger a Codefresh pipeline execution not only on code repository events (like `push` or `PR`), but also on any "interesting" CD-related event, coming from some external system. + +Codefresh not only allows you to define different pipelines on a single project but it also offers you the capability to trigger them with completely separate mechanisms. + + +## Pipeline trigger types + +The following types of triggers are currently supported pipelines: + +* [Git triggers](git-triggers) +* [Dockerhub triggers](dockerhub-triggers) +* [Azure Registry triggers](azure-triggers) +* [Quay triggers](quay-triggers) +* [Helm triggers](helm-triggers) +* [Artifactory triggers](jfrog-triggers) +* [Cron trigger](cron-triggers) +* [API/CLI trigger]({{site.baseurl}}/docs/integrations/codefresh-api/) + +As an example, this project contains four pipelines: + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/pipeline-examples.png" +url="/images/pipeline/triggers/pipeline-examples.png" +alt="Sample pipelines" +caption="Sample pipelines" +max-width="70%" +%} + +Behind the scenes these pipelines are triggered from different events: + +* Pipeline "CI-build" uses a GIT trigger and starts after every commit to the code repository +* Pipeline "Sonarcloud" is executed every weekend using a cron (timed) trigger +* Pipeline "integration-test" is executed whenever a commit happens in a Pull request on the code +* Pipeline "deploy-prod-k8s" is executed whenever a Docker image is pushed to the Docker registry + +This is just an example. You are free to create your own triggers that match your own internal process. +It is also possible to add multiple triggers for a pipeline so that it is executed for more than one type of events. + +If a pipeline has no defined trigger you can still start it manually. + +For all trigger types you can also use the [Codefresh CLI](https://codefresh-io.github.io/cli/triggers/) to manage them. + + + +## Creating a new trigger for a pipeline + +By default, when you create a new project from a Git provider, it will start with a Git trigger that runs on every commit. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/default-git-trigger.png" +url="/images/pipeline/triggers/default-git-trigger.png" +alt="Default GIT Trigger" +caption="Default GIT Trigger" +max-width="50%" +%} + +You can either delete this trigger, modify it, or add new ones. + +To add a new trigger, go to the *Triggers* tab in your pipeline editor and click the *Add Trigger* button. This will bring up the respective dialog where you are adding a new trigger. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +caption="Adding new Trigger dialog" +max-width="70%" +%} + +For more information see: + +* [Git triggers](git-triggers) +* [Dockerhub triggers](dockerhub-triggers) +* [Azure Registry triggers](azure-triggers) +* [Quay triggers](quay-triggers) +* [Helm triggers](helm-triggers) +* [Artifactory triggers](jfrog-triggers) +* [Cron trigger](cron-triggers) + +## Disabling triggers + +You can easily disable a trigger manually if you don't want it to be active anymore. +On the triggers tab, click the gear icon on the top right (*Open advanced options*). + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/enable-triggers.png" +url="/images/pipeline/triggers/enable-triggers.png" +alt="Toggle a trigger on/off" +caption="Toggle a trigger on/off" +max-width="70%" +%} + + +Then click the toggle switch on each trigger that you want to enable/disable. You can later enable the same trigger again +by clicking the same switch. + +## Related articles +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Running pipelines locally]({{site.baseurl}}/docs/pipelines/running-pipelines-locally/) +[Trigger a Kubernetes Deployment from a Dockerhub Push Event]({{site.baseurl}}/docs//yaml-examples/examples/trigger-a-k8s-deployment-from-docker-registry/) diff --git a/_docs/pipelines/triggers/azure-triggers.md b/_docs/pipelines/triggers/azure-triggers.md new file mode 100644 index 00000000..5356be27 --- /dev/null +++ b/_docs/pipelines/triggers/azure-triggers.md @@ -0,0 +1,88 @@ +--- +title: "Azure Registry trigger" +description: "Trigger Codefresh pipelines from Azure Registry events" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/pipeline-triggers/configure-azure-trigger/ +toc: true +--- + +Define and manage Azure Registry triggers for pipelines with the Codefresh UI. + +This allows you to trigger Codefresh pipelines when an Azure Registry event happens (e.g. a new Docker image is uploaded). + +## Manage Azure triggers with Codefresh UI + + +The process involves two parts: + +1. Creating a trigger in Codefresh. This will result in a special Codefresh webhook URL. +1. Creating a new notification in the Azure Registry that will use this URL to call Codefresh. + +> Make sure that you have an Azure cloud account and have already [created a registry](https://docs.microsoft.com/en-us/azure/container-registry/). + + +### Create a new Azure trigger + +To add a new Azure trigger, navigate to a Codefresh Pipeline *Configuration* view and expand the *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="40%" +%} + +Fill the following information: + +* Registry Provider - select `Azure`. +* *Name of Registry* - put Azure name of registry (without `.azurecr.io`). +* *Image Repository Name* - Azure image repository name. +* *Action* - select `Push Image` action. +* *Tags* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/azure/add-trigger-dialog.png" +url="/images/pipeline/triggers/azure/add-trigger-dialog.png" +alt="Azure Registry settings" +max-width="50%" +%} + +Click next and a new dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/azure/view-trigger-dialog.png" +url="/images/pipeline/triggers/azure/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set Azure to call this URL when an event takes place. + +### Set up Azure notification + +The easiest way to create an Azure trigger is with the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/acr/webhook?view=azure-cli-latest#az-acr-webhook-create) (Also available in the Azure portal) + +Here is the command: + +{% highlight shell %} +{% raw %} +az acr webhook create -n MyWebhook -r kostisregistry --uri "https://g.codefresh.io/nomios/azure?account=409f15bdd444&secret=7zyg5Zhb8xYBn4ms" --actions push delete +{% endraw %} +{% endhighlight %} + +The name can be anything you want. The URI is the Codefresh URL that was created in the previous step. + + +### Triggering a Codefresh pipeline with Azure push + +Now, every time you push a new Docker image to the selected Azure Docker repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Azure Push trigger event. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/pipelines/triggers/cron-triggers.md b/_docs/pipelines/triggers/cron-triggers.md new file mode 100644 index 00000000..93838253 --- /dev/null +++ b/_docs/pipelines/triggers/cron-triggers.md @@ -0,0 +1,104 @@ +--- +title: "Cron Trigger" +description: "Run pipelines with a time schedule" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/configure-cron-trigger/ + - /docs/pipeline-triggers/configure-cron-trigger/ +toc: true +--- + +Cron triggers allow you to create pipelines that start on a specific time schedule. This is very useful for cleanup jobs or periodic checks or any other workflow that needs to run after a time interval. + +>All times mentioned in Cron triggers use the UTC time zone. + +## Manage Cron Triggers with Codefresh UI + +It is possible to define and manage Cron-based pipeline triggers with Codefresh UI. + +### Create a new Cron Trigger + +To add a new Cron trigger, navigate to Codefresh Pipeline *Configuration* view and expand *Triggers* section. Press the `Add Trigger` button and select a `Cron` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + + +Visit [this page](https://github.com/codefresh-io/cronus/blob/master/docs/expression.md) to learn about supported `cron` expression format and aliases. + + +Fill the following information: + +* Use Cron helper wizard to build a valid `cron` expression or write custom `cron` expression on the last tab. +* Add a free text message, that will be sent as an additional event payload every time `cron` is executed. + +{% include image.html +lightbox="true" +file="/images/cron_trigger.png" +url="/images/cron_trigger.png" +alt="Add Cron Trigger" +max-width="70%" +%} + + +### Trigger Codefresh pipeline with cron timer + +Now, `cron` will trigger a recurrent pipeline execution based on the defined `cron expression`. + +## Manage Cron Triggers with Codefresh CLI + +It is also possible to use the Codefresh Command Line client (`CLI`) to manage Cron based pipeline triggers. + +### Cron trigger + +It is possible to trigger a Codefresh CD pipeline(s) periodically, using `cron` expression. + +You can use [Codefresh CLI](https://cli.codefresh.io/) to setup a Codefresh `cron` trigger. + +#### Create Cron trigger-event + +First, you need to create a new `cron` `trigger-event` to define a recurrent event. + +```sh +# create DockerHub recurrent event 'once in 20 minutes' +codefresh create trigger-event --type cron --kind codefresh --value expression="0 */20 * * * *" --value message="hello-once-in-20-min" + +# on success trigger-event UID will be printed out +Trigger event: "cron:codefresh:codefresh:0 */20 * * * *:hello-once-in-20-min:107e9db97062" was successfully created. +``` + +When creating a `cron trigger-event`, it is possible to specify a short text message, that will be passed to linked pipelines, every time the specified `cron` timer is triggered. + +Visit [this page](https://github.com/codefresh-io/cronus/blob/master/docs/expression.md) to learn about the supported `cron` expression format and aliases. + +#### Set up pipeline trigger + +Now, lets create a new pipeline trigger, linking previously defined `cron` `trigger-event` to one or more Codefresh pipelines. + +```sh +# create trigger, linking trigger-event UID to the pipeline UID +codefresh create trigger "cron:codefresh:codefresh:0 */20 * * * *:hello-once-in-20-min:107e9db97062" 7a5622e4b1ad5ba0018a3c9c + +# create another trigger, linking the same trigger-event to another pipeline +codefresh create trigger "cron:codefresh:codefresh:0 */20 * * * *:hello-once-in-20-min:107e9db97062" 4a5634e4b2cd6baf021a3c0a +``` + +From now on, every 20 minutes Codefresh will trigger a pipeline execution for 2 pipelines linked to the previously specified `cron` `trigger-event` (once in 20 minutes) + +#### Cron Event payload + +The following variables will be available for any Codefresh pipeline linked to a `cron` `trigger-event`: + +- `EVENT_MESSAGE` - free text message (specified during creation) +- `EVENT_TIMESTAMP` - event timestamp in RFC 3339 format + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) + diff --git a/_docs/pipelines/triggers/dockerhub-triggers.md b/_docs/pipelines/triggers/dockerhub-triggers.md new file mode 100644 index 00000000..1268e4d8 --- /dev/null +++ b/_docs/pipelines/triggers/dockerhub-triggers.md @@ -0,0 +1,152 @@ +--- +title: "DockerHub triggers" +description: "" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/configure-dockerhub-trigger/ + - /docs/pipeline-triggers/configure-dockerhub-trigger/ +toc: true +--- + + +You can define and manage DockerHub triggers in Codefresh. + +### Create a new DockerHub trigger in Codefresh UI + +To add a new DockerHub trigger, navigate to Codefresh Pipeline *Configuration* view and expand *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + +Fill the following information: + +* *Registry Provider* - select `DockerHub`. +* *User/Organization Name* - put DockerHub user name or organization name here. +* *Image Repository Name* - DockerHub image repository name. +* *Action* - select `Push Image` action. +* *Tag* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/dockerhub/dockerhub_trigger_1.png" +url="/images/pipeline/triggers/dockerhub/dockerhub_trigger_1.png" +alt="Add Registry Trigger" +max-width="70%" +%} + +### Setup DockerHub Webhook + +Currently Codefresh does not support automatically setting up a DockerHub webhook. You need to do this manually. Press the *Next* button and see detailed instructions with URL links and secrets of how-to setup a DockerHub Webhook. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/dockerhub/dockerhub_trigger_2.png" +url="/images/pipeline/triggers/dockerhub/dockerhub_trigger_2.png" +alt="Add Webhook" +max-width="70%" +%} + +1. Copy `Endpoint` URL +1. Visit DockerHub image settings page following link in help +1. Add a new DockerHub Webhook with previously copied `Endpoint` URL + +### Triggering Codefresh pipeline with DockerHub push + +Now, every time you push a new Docker image to selected DockerHub repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with this DockerHub Push trigger event. + +## Manage DockerHub triggers with Codefresh CLI + +It is possible to use `codefresh` command line client (`CLI`) to manage DockerHub pipeline triggers. + +### Docker Hub Trigger + +It is possible to trigger Codefresh CD pipeline(s) when a new Docker image pushed into DockerHub. + +You can use [Codefresh CLI](https://cli.codefresh.io/) to setup a Codefresh trigger for DockerHub. + +#### Create DockerHub trigger-event + +First, create a `trigger-event` for every DockerHub image, you would like to setup a Codefresh trigger. + +``` +# create DockerHub trigger event for codefresh/fortune +codefresh create trigger-event --type registry --kind dockerhub --value namespace=codefresh --value name=fortune --value action=push + +# on success trigger-event UID will be printed out +Trigger event: registry:dockerhub:codefresh:fortune:push:107e9db97062 was successfully created. +``` + +#### Set up DockerHub webhook + +Currently, an additional manual action is required to bind DockerHub `push` image event to the Codefresh `trigger-event`. + +``` +# get trigger-event details for previously created trigger-event +codefresh get trigger-event -o yaml registry:dockerhub:codefresh:fortune:push:107e9db97062 +``` + +... command output: + +```yaml +uri: 'registry:dockerhub:codefresh:fortune:push:107e9db97062' +type: registry +kind: dockerhub +public: false +secret: aGao5weuez2G6WF9 +status: active +endpoint: >- + https://g.codefresh.io/nomios/dockerhub?account=107e9db97062&secret=aGao5weuez2G6WF9 +description: Docker Hub codefresh/fortune push event +help: >- + Docker Hub webhooks fire when an image is built in, pushed or a new tag is + added to, your repository. + + + Configure Docker Hub webhooks on + https://hub.docker.com/r/codefresh/fortune/~/settings/webhooks/ + + + Add following Codefresh Docker Hub webhook endpoint + https://g.codefresh.io/nomios/dockerhub?account=107e9db97062&secret=aGao5weuez2G6WF9 +``` + +1. Copy `endpoint` URL +1. Visit DockerHub settings page [https://hub.docker.com/r/codefresh/fortune/~/settings/webhooks/](https://hub.docker.com/r/codefresh/fortune/~/settings/webhooks/). +1. Add a new Webhook with previously copied `endpoint` URL. + + +#### Set up pipeline trigger + +Now, lets set up a new pipeline trigger, linking previously defined DockerHub push `codefresh/fortune` `trigger-event` to one or more Codefresh pipelines. + +``` +# create trigger, linking trigger-event UID to the pipeline UID +codefresh create trigger "registry:dockerhub:codefresh:fortune:push:107e9db97062" 7a5622e4b1ad5ba0018a3c9c + +# create another trigger, linking the same trigger-event to another pipeline +codefresh create trigger "registry:dockerhub:codefresh:fortune:push:107e9db97062" 4a5634e4b2cd6baf021a3c0a +``` + +From now on, Codefresh will trigger pipeline execution when new `codefresh/fortune` image is pushed to the DockerHub. + +#### DockerHub Event payload + +The following variables will be available for any Codefresh pipeline linked to a DockerHub `trigger-event`: + +- `EVENT_NAMESPACE` - DockerHub namespace (alias `organization`). +- `EVENT_NAME` - DockerHub image name (alias `repository`). +- `EVENT_TAG` - Docker image tag. +- `EVENT_PUSHER` - user who pushed this Docker image. +- `EVENT_PUSHED_AT` - timestamp for push event. +- `EVENT_PAYLOAD` - original DockerHub Webhook JSON payload. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) diff --git a/_docs/pipelines/triggers/git-triggers.md b/_docs/pipelines/triggers/git-triggers.md new file mode 100644 index 00000000..de0a551e --- /dev/null +++ b/_docs/pipelines/triggers/git-triggers.md @@ -0,0 +1,371 @@ +--- +title: "Git triggers" +description: "Learn how to run pipelines from Git events" +group: pipelines +sub_group: triggers +toc: true +--- + +Git triggers are the most basic of the trigger typesfor performing [Continuous Integration](https://en.wikipedia.org/wiki/Continuous_integration) with Codefresh. + +At the trigger level, you can select: + +* Which code repository will be used as a trigger +* Which branches will be affected by a pipeline +* If a trigger will apply to a Pull Request (PR) or not + +> You can select a repository other than the one the project itself belongs to. It is possible + to trigger a build on project A even though a commit happened on project B. + +You can also use [conditional expressions]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) at the pipeline level to further fine-tune the way specific steps (or other transitive pipelines) are executed. + +## Manage GIT triggers with Codefresh UI + +To add a new GIT trigger, navigate to the Codefresh Pipeline *Configuration* view and expand the *Triggers* section on the right side. Press the *Add Trigger* button and select a *GIT* trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + +## General trigger Settings + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-git-trigger.png" +url="/images/pipeline/triggers/add-git-trigger.png" +alt="Adding GIT Trigger" +max-width="50%" +%} + +The Git trigger is comprised of the following settings: + +* *Trigger Name* - a freetext trigger name (required). +* *Description* - a freetext description (optional). +* *Repository* - you can select any repository even something different than the one that is used for the code checkout. +* *Commit Checkbox* - if enabled will trigger this pipeline for any commit. +* *PR Checkboxes* - various checkboxes for filtering the Pull request event. + +The commit checkbox (by default it is enabled) means that this pipeline will run for *any* commit as long as its source branch matches the naming scheme. This includes commits on pull requests. + +The PR checkboxes mean that this pipeline will run only on the respective events that happen on a Pull Request. You can select multiple checkboxes to further fine-tune the exact event. If you are interested in all events, select the checkbox *Any Pull Request event*. + +>The individual Pull request checkboxes are available only for GitHub repositories. + +## Configure Filter Settings + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/configure-filter-settings.png" +url="/images/pipeline/triggers/configure-filter-settings.png" +alt="Configure Filter Settings" +max-width="50%" +%} + +* *Support pull request events from forks* - toggle that is useful for open source projects. +* *Branch Field* - this is a regular expression and will only trigger for branches that match this naming pattern. +* *PR Comment (restricted) and PR Comment Fields* - useful for open source projects. +* *Pull Request Target* branch - this is a regular expression and will trigger only when a Pull request is created against any branch that matches it. +* *Modified Files* - allows you to constrain the build and trigger it only if the modified files from the commit match this [glob expression](https://en.wikipedia.org/wiki/Glob_(programming)). + +### Pull Request Target Branch and Branch + +The Pull Request Target Branch field allows you to trigger this pipeline only when the target of a Pull Request (i.e. where the pr is going to be merged at) matches the +branch name regular expression. Common examples for branch names would be `master` or `production`. + +This field has only meaning when a commit happens in the context of a pull request and in that case: + +1. The Branch field will look at the branch that the commit is happening on +1. The PR Target Branch field will look at the branch the PR is happening against + +For example, if you create a commit on a branch that is named `my-feature` which is currently part of PR against branch `staging` (i.e. somebody wants to merge `my-feature` **TO** `staging`) then: + +1. The `BRANCH` field value will try to match against `my-feature` +1. the `PULL REQUEST TARGET BRANCH` will try to match against `staging` + +Here are some more syntax examples: + +* `/^((qa-release)$).*/g` - only run if branch is named `qa-release`. +* `/^((production)$).*/g` - only run if branch is named `production`. +* `/release/g` - only run if branch name contains `release` as substring. +* `/feature-/gi` - only run if branch is `feature-foo`, `feature-bar`, `my-feature-123` etc. +* `/^((?!^feature).)*$/gi` - only run if branch name does **not** start with `feature`. + +>The field *Pull Request Target* is available for all Git providers apart from Atlassian stash. +> +>When using the Terraform Provider, please use the [Go regex syntax](https://github.com/google/re2/wiki/Syntax) as some perl regex syntax is not compatible. + +The concept behind these checkboxes and branch name fields is to allow you to define which pipelines run for various workflows in your organization. + +As a simple example you can have a *production* pipeline that runs only on *master* branch (and therefore the branch field says "master") and a *testing* pipeline that runs user acceptance tests where only the Pull Request Open checkbox is active. This means that User Acceptance tests will run whenever a PR is created. Once it is merged the *production* pipeline will deploy the changes. + +In a more advanced example, you could add regular expressions in the branch field with names such as *feature-*, *hotfix-* etc. and the PR checkbox active on different pipelines. This way you could trigger the pull requests only when they happen on specific branches. So, a developer that creates a temporary feature with a name that doesn't match these naming patterns will not trigger those pipelines. + +Notice also that you can use Negative Lookahead in your Branch (Regex Expression) filter. An example to exclude tag events: `/^((?!tag)).*/gi` (the pattern here for tags to exclude is that they begin with `tag…`). + +This will make all push-events (including tags) that do follow the `tag...` pattern to be excluded. +Therefore, all tags like `tag1`, `tag-X` **won't** trigger the pipeline. + +### Pull Requests from comments + +Pull Requests from comments are supported for all Git providers, for both private and public repositories. +There are two options: +* **Pull request comment added (restricted)** + This option triggers an event only when the PR comments are made by repository owners or collaborators. +* **Pull request comment added** + This option triggers an event when PR comments are made by any user, regardless of their permissions. + Because it is not restricted to owners and collaborators, this option is useful in GitHub, to enable triggers for PR comments made by users in GitHub teams. + + > We strongly recommend selecting this option only for _private repositories_. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/pr-comment-trigger-options.png" +url="/images/pipeline/triggers/pr-comment-trigger-options.png" +alt="Trigger options for PR comments" +caption="Trigger options for PR comments" +max-width="50%" +%} + + +### Support for building pull requests from forks + +By default, the Git trigger works for events coming from your personal repository. You can also use triggers from events that are coming from forks. This is a very useful feature for open source projects, as it allows you to run your own unit tests and other checks against a new feature *before* actually merging it in your repo. + +To enable this behavior: + +* Toggle the *support pull request events from forks* switch +* Select *Pull request comment added (restricted)* +* In the *pr comment* field enter a custom string (accepts regex) + +Then once a contributor creates a fork of your repository and submits a pull request, you can review the code and then add a comment on your own that matches the PR comment expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/pr-from-fork.png" +url="/images/pipeline/triggers/pr-from-fork.png" +alt="Triggering a public build from a comment" +caption="Triggering a public build from a comment" +max-width="50%" +%} + +Once that is done, Codefresh will launch your pipeline against the Pull Request. If you manage an open source project with Codefresh, remember to enable [public builds]({{site.baseurl}}/docs/configure-ci-cd-pipeline/build-status/#public-build-logs) as well. + +When supporting building of pull requests from forks there are a few "gotchas" to look out for: + +* Only comments made by repository owners and [collaborators](https://help.github.com/en/github/setting-up-and-managing-organizations-and-teams/adding-outside-collaborators-to-repositories-in-your-organization) will result in the pipeline being triggered. +* Only Git pushes by collaborators within the GitHub organization will result in the pipeline being triggered +* If the repository is in a GitHub organization, comments made by private members of the organization will not activate the trigger, even if they are set as an owner or collaborator. Private members means that they need to be explicitly added to the repository. +Access cannot be "inherited" by the GitHub team. Currently, only comments from Admins, or Collaborators (directly added, not via teams) are allowed, in order to be caught by this filter. +* The *Pull request comment added* checkbox should likely be the only one checked, or your pipeline may trigger on other events that you don't anticipate. + + + +### Monorepo support (Modified files) + +The *modified files* field is a very powerful Codefresh feature that allows you to trigger a build only if the +files affected by a commit are in a specific folder (or match a specific naming pattern). This means that +you can have a big GIT repository with multiple projects and build only the parts that actually change. + +>Currently the field *modified files* is available only for GitHub, GitLab, Azure DevOps and [Bitbucket Server and Data Center](https://confluence.atlassian.com/bitbucketserver/add-a-post-service-webhook-776640367.html) repositories, since they are the only GIT providers +that send this information in the webhook. We will support other GIT providers as soon as they add the respective feature. + +### Using the Modified files field to constrain triggers to specific folder/files + +The *modified files* field accepts glob expressions. The paths are relative to the root folder of the project (where the git repository was checked out). Some possible examples are: + +``` +**/package.json +**/Dockerfile* +my-subproject/** +my-subproject/sub-subproject/package.json +my-subproject/**/pom.xml +!config/** + +``` + +>You can also use relative paths with dot-slash. Therefore `./package.json` and `package.json` are exactly the same thing. They both refer to the file `package.json` found at the root of the git project that was checked out as part of the build. + +You can also define [multiple expressions](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm) like this (but notice that there is a limit of 150 characters for the field): + +``` +{app/**,test/**} +{**/package.json,my-subproject/**} +!{deployment/**,**/version.cfg} +``` + +Once a commit happens to a code repository, Codefresh will see which files are changed from the git provider and trigger the build **only** if the changed files match the glob expression. If there is no match no build will be triggered. + +> Notice that the `{}` characters are only needed if you have more than one expression. Do not use them if you have a single glob expression in the field. + +This is a very useful feature for organizations who have chosen to have multiple projects on the same GIT repository (monorepos). Let's assume for example that a single system has a Java backend, a NestJS frontend and a Ruby-on-Rails internal dashboard. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/monorepo.png" +url="/images/pipeline/triggers/monorepo.png" +alt="GIT monorepo" +max-width="60%" +%} + +Now we can define 3 different pipelines in Codefresh where each one builds the respective project + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/monorepo-pipelines.png" +url="/images/pipeline/triggers/monorepo-pipelines.png" +alt="GIT monorepo pipelines" +max-width="70%" +%} + +And then in the GIT trigger for each one we set the modified files field to the following values: + +* For the *build-nestjs-only* pipeline *MODIFIED FILES* has `my-nestjs-project/**`. +* For the *build-java-only* pipeline *MODIFIED FILES* has `my-java-project/**`. +* For the *build-rails-only* pipeline *MODIFIED FILES* has `my-rails-project/**`. + +This way as multiple developers work on the git repository only the affected projects will actually build. A change to the NestJS project will *not* build the Rails project as well. Also, if somebody changes *only* the README file and nothing else, no build will be triggered at all (which is a good thing as the source code is exactly the same). + +You can also use Glob expressions for files. For example: + +* An expression such as `my-subproject/sub-subproject/package.json` will trigger a build **only** if the dependencies of this specific project are changed +* A pipeline with the expression `my-subproject/**/pom.xml` will trigger only if the Java dependencies for any project that belongs to `my-subproject` actually change +* An expression such as `!config/manifest.yaml` will trigger a build if any file was changed *apart from* `config/manifest.yaml` + +Glob expressions have many more options not shown here. Visit the [official documentation](https://en.wikipedia.org/wiki/Glob_(programming)) to learn more. You can also use the [Glob Tester web application](https://www.digitalocean.com/community/tools/glob) to test your glob expressions beforehand so that you are certain they match the files you expect them to match. + +## Advanced Options + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/advanced-options.png" +url="/images/pipeline/triggers/advanced-options.png" +alt="Advanced Options" +max-width="60%" +%} + +* *Commit Status Title* - the commit status title pushed to the GIT version control system. By default, is the pipeline name, but you can override the name on GIT trigger. +* *Build Variables* - import a [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) or manually add variables +* *More Options* + * *Ignore Docker engine cache for build* - selecting this option may slow down your build. See #1 [here]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) + * *Ignore Codefresh cache optimizations for build* - selecting this option may slow down your build. See #2 [here]({{site.baseurl}}/docs/troubleshooting/common-issues/disabling-codefresh-caching-mechanisms/) + * *Reset pipeline volume* - useful for troubleshooting a build that hangs on the first step. See [here]({{site.baseurl}}/docs/troubleshooting/common-issues/restoring-data-from-pre-existing-image-hangs-on/) + * *Report notification on pipeline execution* - Decide if [Slack notifications]({{site.baseurl}}/docs/integrations/notifications/slack-integration/) will be sent (as well as status updates back to your Git provider) +* *Runtime Environment* - choose to use pipeline [settings]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-settings) or override them + +### Set minimum disk space for build volume by trigger +Set the disk space you need for the build volume in the context of the selected trigger. Setting the disk space for the trigger overrides that set for the pipeline. + +1. In **Workflow > Triggers**, expand **Advanced Options**. +1. From the Runtime Environment list, select **Override pipeline settings**, and then select the runtime for which to override the pipeline setting. +1. If required, change the resource size. +1. Enable **Set minimum disk space**, and then change as required. + +## Manually adding the trigger to GitHub + +When creating a Git trigger in codefresh, sometimes the Git Integration does not have the permissions to create a webhook on the designated repository. When this happens, you get the following error: `Failed to add Trigger`. + +This error means that Codefresh could not create the webhook and verify that it works. With that, Codefresh will mark the Trigger as Unverified. Two additional fields (Endpoint and Secret) will appear under the "Verify Trigger" button when you get this error. + +- **Endpoint**: This will be the Webhook URL for the created Trigger +- **Secret**: Token to add to Github for verification. + +### Adding Webhook to Github + +1. When you receive the `Failed to add Trigger`, log into GitHub. + - Make sure this user can access the repository settings and create Webhooks +1. Go to the repository mentioned in the "REPOSITORY" section from Unverified Trigger. +1. Go to Settings > Webhooks and click the "Add webhook" button. +1. Fill in the form + - **Payload URL**: The URL from the Endpoint field from the Trigger + - **Content type**: application/json + - **Secret**: The token in the Secret field from the Trigger + - **SSL verification**: Enable SSL verification + - **Events**: + 1. Select let me select individual events + 2. Match the items selected in the Trigger By field from the Trigger + - **Active**: Make sure this is selected +1. Click "Add webhook" when done. +1. Click "Done" in the Add Trigger form. +1. Test your webhook by making an event in the repository that will cause the Trigger to start the build. + +> **Note**: + * You will be responsible for syncing the Trigger By to the Events sent to us for the webhook. You can select "Send me everything" if you do not want to manually match the Trigger By in the Trigger with the Webhook Events in GitHub. + * The Trigger will remain "Unverified" until the integration has the correct permissions to the repository. + +## Accessing webhook content of the trigger directly + +If your Git trigger is coming from Github, you can also access the whole payload of the webhook that was responsible for the trigger. +The webhook content is available at `/codefresh/volume/event.json`. You can read this file in any pipeline step and process it like any other json file (e.g. with the jq utility). + +`codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + read_trigger_webook: + title: "Reading Github webhook content" + type: "freestyle" + image: "alpine:3.9" + commands: + - 'cat /codefresh/volume/event.json' +{% endraw %} +{% endhighlight %} + +Notice however that this file is only available when the pipeline was triggered from a GitHub event. If you manually run the pipeline, the file is not present. + +## Using YAML and the Codefresh CLI to filter specific Webhook events + +The default GUI options exposed by Codefresh are just a starting point for GIT triggers and pull requests. Using [Codefresh YAML]({{site.baseurl}}/docs/codefresh-yaml/what-is-the-codefresh-yaml/) and the [Codefresh CLI plugin](https://codefresh-io.github.io/cli/) you can further create two-phase pipelines where the first one decides +which webhook events will be honored and the second one contains the actual build. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/two-phase-pipeline.png" +url="/images/pipeline/triggers/two-phase-pipeline.png" +alt="Two phase pipeline" +max-width="80%" +%} + +The generic GIT trigger is placed on Pipeline A. This pipeline then filters the applicable webhooks using [conditional expressions]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/). Then it uses the Codefresh CLI plugin (and specifically the [run pipeline capability](https://codefresh-io.github.io/cli/pipelines/run-pipeline/)) to trigger pipeline B that performs build. + +Some of the YAML variables that you might find useful (from the [full list]({{site.baseurl}}/docs/codefresh-yaml/variables/)): + +* `CF_PULL_REQUEST_ACTION` - open, close, synchronize, assign etc. +* `CF_PULL_REQUEST_TARGET` - target branch of the pull request. +* `CF_BRANCH` - the branch that contains the pull request. + +As an example, here is the `codefresh.yml` file of pipeline A where we want to run pipeline B only when a Pull Requested is opened against a branch named *production*. + +`codefresh.yml` of pipeline A +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + triggerstep: + title: trigger + image: codefresh/cli + commands: + - 'codefresh run -b=${{CF_BRANCH}}' -t + when: + condition: + all: + validateTargetBranch: '"${{CF_PULL_REQUEST_TARGET}}" == "production"' + validatePRAction: '''${{CF_PULL_REQUEST_ACTION}}'' == ''opened''' +{% endraw %} +{% endhighlight %} + +This is the build definition for the first pipeline that has a GIT trigger (with the Pull request checkbox enabled). +It has only a single step which uses conditionals that check the name of the branch where the pull request is targeted to, as well as the pull request action. Only if *both* of these conditions are true then the build step is executed. + +The build step calls the second pipeline. The end result is that pipeline B runs only when the Pull Request is opened the first time. Any further commits on the pull request branch will **not** trigger pipeline B (pipeline A will still run but the conditionals will fail). + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Cron triggers]({{site.baseurl}}/docs/pipelines/triggers/cron-triggers/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[Multi-git trigger]({{site.baseurl}}/docs/troubleshooting/common-issues/multi-git-triggers/) diff --git a/_docs/pipelines/triggers/helm-triggers.md b/_docs/pipelines/triggers/helm-triggers.md new file mode 100644 index 00000000..98ede0e9 --- /dev/null +++ b/_docs/pipelines/triggers/helm-triggers.md @@ -0,0 +1,61 @@ +--- +title: "Helm Trigger" +description: "" +group: configure-ci-cd-pipeline +sub_group: triggers +toc: true +--- + +Codefresh has the option to create pipelines that respond to Helm events. For instance, one pipeline can be set-up to create a Docker image and chart. Once those are created, another pipeline is triggered to implement the actual deployment. + +Define and manage Helm pipeline triggers with the Codefresh UI. + +## Create a new Helm Trigger + +To add a new Helm trigger, navigate to Codefresh Pipeline *Configuration* view and expand *Triggers* section. Press the `Add Trigger` button and select the `Helm` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="60%" +%} + +Fill the following information: +* *Helm Provider* - select `JFrog Artifactory`. +* *Repository* - put JFrog name of the Artifactory repository. +* *Chart Name* - put name of the chart in the Artifactory repository. +* *Action* - select `Push Chart` action. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/configure-artifactory.png" +url="/images/pipeline/triggers/jfrog/configure-artifactory.png" +alt="Helm Artifactory settings" +max-width="50%" +%} + +Click next and a new Dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +url="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set JFrog Artifactory to call this URL when an event takes place. This can either be done through the [JFrog Artifactory webhook plugin]({{site.baseurl}}/docs/pipelines/triggers/jfrog-triggers/) or through [setting up Webhooks](https://www.jfrog.com/confluence/display/JFROG/Webhooks) in the UI. + +## Trigger Codefresh pipeline with an Artifactory push + +Now, every time you push a Helm chart to the selected Artifactory repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Artifactory Push trigger event. + + +## Related articles +[Helm Releases management](https://codefresh.io/docs/docs/new-helm/helm-releases-management/) +[Custom Helm uploads](https://codefresh.io/docs/docs/new-helm/custom-helm-uploads/) +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) diff --git a/_docs/pipelines/triggers/jfrog-triggers.md b/_docs/pipelines/triggers/jfrog-triggers.md new file mode 100644 index 00000000..97471f4d --- /dev/null +++ b/_docs/pipelines/triggers/jfrog-triggers.md @@ -0,0 +1,101 @@ +--- +title: "Artifactory trigger" +description: "Trigger Codefresh pipelines from Artifactory" +group: configure-ci-cd-pipeline +sub_group: triggers +redirect_from: + - /docs/pipeline-triggers/configure-jfrog-trigger/ +toc: true +--- + +Define and manage Artifactory pipeline triggers with the Codefresh UI. +This allows you to trigger Codefresh pipelines when an Artifactory event occurs (i.e. a new Docker image is uploaded). + +## Manage Artifactory Triggers with Codefresh UI + + +The process involves two parts: + +1. Creating a trigger in Codefresh. This will result in a special Codefresh webhook URL +1. Activating the [webhook plugin](https://github.com/jfrog/artifactory-user-plugins/tree/master/webhook) in Artifactory and setting it up to call the Codefresh URL + +> Make sure that you have admin access to your Artifactory instance in order to setup its webhook plugin. + +### Create a new Artifactory trigger + +To add a new Artifactory trigger, navigate to a Codefresh Pipeline *Configuration* view and expand the *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="40%" +%} + +Fill the following information: + +* *Registry Provider* - select `JFrog`. +* *Repository Name* - put JFrog name of repository. +* *Docker Image Name* - put name of Docker image. +* *Action* - select `Push Image` action. +* *Tag* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/configure-trigger.png" +url="/images/pipeline/triggers/jfrog/configure-trigger.png" +alt="Artifactory Registry settings" +max-width="50%" +%} + +Click next and a new Dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +url="/images/pipeline/triggers/jfrog/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set JFrog Artifactory to call this URL when an event takes place. + +### Set up JFrog Artifactory webhook plugin + +The [webhook functionality](https://github.com/jfrog/artifactory-user-plugins/tree/master/webhook) in JFrog artifactory comes in plugin. +You can read [detailed documentation](https://www.jfrog.com/confluence/display/RTF/User+Plugins) for JFrog plugins but in summary: + +* The file `webhook.groovy` needs to be copied to `ARTIFACTORY_HOME/etc/plugins` (the plugin itself) +* A file `webhook.config.json` should also be placed in the same folder (the plugin setup) + +Here is an example for Codefresh. + +`webhook.config.json` +{% highlight json %} +{% raw %} +{ + "webhooks": { + "mywebhook": { + "url": "https://g.codefresh.io/nomios/jfrog?account=2dfdf89f235bfe&sefgt=EvQf9bBS55UPekCu", + "events": [ + "docker.tagCreated" + ] + } + }, + "debug": false, + "timeout": 15000 +} +{% endraw %} +{% endhighlight %} + + + +### Trigger a Codefresh pipeline with an Artifactory push + +Now, every time you push/tag a Docker image to the selected Artifactory repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Artifactory Push trigger event. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/pipelines/triggers/quay-triggers.md b/_docs/pipelines/triggers/quay-triggers.md new file mode 100644 index 00000000..1e7e275f --- /dev/null +++ b/_docs/pipelines/triggers/quay-triggers.md @@ -0,0 +1,102 @@ +--- +title: "Quay Trigger" +description: "Trigger Codefresh pipelines from Quay" +group: pipelines +sub_group: triggers +redirect_from: + - /docs/pipeline-triggers/configure-quay-trigger/ +toc: true +--- + +Define and manage Quay triggers for pipelines with the Codefresh UI. +This allows you to trigger Codefresh pipelines when a Quay event happens (e.g. a new Docker image is uploaded). + +## Manage Quay triggers with Codefresh UI + + +The process involves two parts: + +1. Creating a trigger in Codefresh (this will result in a special Codefresh webhook URL) +1. Creating a new notification in Quay that will use this URL to call Codefresh + +> Make sure that you have a Quay account and have already [created a repository](https://docs.quay.io/guides/create-repo.html) (or pushed a Docker image at least once). + + +### Create a new Quay Trigger + +To add a new Quay trigger, navigate to a Codefresh Pipeline *Configuration* view and expand the *Triggers* section. Press the `Add Trigger` button and select a `Registry` trigger type to add. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/add-trigger-dialog.png" +url="/images/pipeline/triggers/add-trigger-dialog.png" +alt="Adding new Trigger dialog" +max-width="40%" +%} + +Fill the following information: + +* *Registry Provider* - select `Quay`. +* *User/Organization Name* - put Quay username or organization name here. +* *Image Repository Name* - Quay image repository name. +* *Action* - select `Push Image` action. +* *Tag* - optional filter to specify which image *tags* will trigger pipeline execution: [Re2](https://github.com/google/re2/wiki/Syntax) regular expression. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/add-trigger-dialog.png" +url="/images/pipeline/triggers/quay/add-trigger-dialog.png" +alt="Quay Registry settings" +max-width="50%" +%} + +Click next and a new Dialog will appear that shows you the Codefresh webhook URL. Copy it to your clipboard. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/view-trigger-dialog.png" +url="/images/pipeline/triggers/quay/view-trigger-dialog.png" +alt="Codefresh webhook URL" +max-width="50%" +%} + +Now we must set Quay to call this URL when an event takes place. + +### Set up Quay notification + +Log in your Quay account and go to the respective repository. You can also click the link shown in the Codefresh dialog to go directly to the settings of that repository. + +Scroll down and under *Events and Notifications* click *Create Notification*. + + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/add-quay-notification.png" +url="/images/pipeline/triggers/quay/add-quay-notification.png" +alt="Add Quay Notification" +max-width="50%" +%} + +In the new screen select *Push to repository* from the drop-down or any other event that you wish the Codefresh pipeline to trigger. + +{% include image.html +lightbox="true" +file="/images/pipeline/triggers/quay/edit-quay-notification.png" +url="/images/pipeline/triggers/quay/edit-quay-notification.png" +alt="Edit Quay Notification" +max-width="50%" +%} + +From the next dropdown choose *Webhook Post*. In the *Webhook URL entry* paste the Codefresh URL that was created in the Codefresh Trigger dialog. + +Finally click *Create Notification*. + + +### Triggering a Codefresh pipeline with Quay push + +Now, every time you push a new Docker image to the selected Quay repository, manually, with Codefresh or any other CI/CD tool, Codefresh will trigger execution of all pipelines associated with that Quay Push trigger event. + +## Related articles +[Triggers for pipelines]({{site.baseurl}}/docs/pipelines/triggers) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) \ No newline at end of file diff --git a/_docs/pipelines/using-secrets.md b/_docs/pipelines/using-secrets.md deleted file mode 100644 index 58204057..00000000 --- a/_docs/pipelines/using-secrets.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "Using secrets" -description: "" -group: pipelines -toc: true ---- - -Coming soon diff --git a/_docs/pipelines/variables.md b/_docs/pipelines/variables.md new file mode 100644 index 00000000..bfd05a42 --- /dev/null +++ b/_docs/pipelines/variables.md @@ -0,0 +1,338 @@ +--- +title: "Variables in CI pipelines" +description: "" +group: pipelines +redirect_from: + - /docs/variables/ +toc: true +--- +Codefresh provides a set of predefined variables automatically in each build, that you can use to parameterize the way your pipeline works. You can also define your own variables. Some common examples of predefined variables include: + +* `CF_BRANCH` is the Git branch that was used for this pipeline. +* `CF_REVISION` is the Git hash that was used for this pipeline. +* `CF_BUILD_URL` is the url of the pipeline build. + +## Using Codefresh variables in your pipelines + +There are two ways to use a Codefresh variable in your pipelines: + +1. By default all variables will be exposed as UNIX environment variables in all freestyle steps as `$MY_VARIABLE_EXAMPLE`. +1. Variables can be used in YAML properties with the syntax {% raw %}`${{MY_VARIABLE_EXAMPLE}}`{% endraw %}. + +> If you are unsure about which form you need to use, feel free to use {% raw %}`${{MY_VARIABLE_EXAMPLE}}`{% endraw %} everywhere. This is the Codefresh specific form and should function in all sections of `codefresh.yml`. + +For example, you can print out the branch as an environment variable like this: + +`YAML` +{% highlight yaml %} +{% raw %} +MyOwnStep: + title: Variable example + image: alpine + commands: + - echo $CF_BUILD_ID + - echo $CF_BRANCH_TAG_NORMALIZED +{% endraw %} +{% endhighlight %} + +In the example above we are using simple `echo` commands, but any program or script that reads environment variables could also read them in the same manner. + +Using variables directly in yaml properties can be done like this: + +`YAML` +{% highlight yaml %} +{% raw %} +MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + tag: ${{CF_BRANCH_TAG_NORMALIZED}} +{% endraw %} +{% endhighlight %} + +You can also concatenate variables: + +`YAML` +{% highlight yaml %} +{% raw %} +MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + tag: ${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}} +{% endraw %} +{% endhighlight %} + +This will create docker images with tags such as: + +``` +master-df6a04c +develop-ba1cd68 +feature-vb145dh +``` + + + + +Notice that this syntax is specific to Codefresh and is **only** available within the Codefresh YAML file itself. If you want to write scripts or programs that use the Codefresh variables, you need to make them aware of the environment variable form. + + +## System variables + +System variables are automatically injected to any freestyle step as environment variables. + +> It is important to understand that all Git related variables such `CF_BRANCH`, `CF_COMMIT_MESSAGE`, `CF_REVISION` etc. are coming directly from the Git provider you use and have the same limitations of that provider. For example GitLab is sending less information in pull request events than normal pushes, and Bitbucket sends only the short hash of a commit in pull request events. We suggest you read the documentation of your Git provider first to understand what information is available for every Git event + +{: .table .table-bordered .table-hover} +| Variable | Description | +| ------------------------------------------------- | ------------------------------------------------------ | +| {% raw %}`${{CF_REPO_OWNER}} `{% endraw %} | Repository owner. | +| {% raw %}`${{CF_REPO_NAME}}`{% endraw %} | Repository name. | +| {% raw %}`${{CF_BRANCH}}`{% endraw %} | Branch name (or Tag depending on the payload json) of the Git repository of the main pipeline, at the time of execution.
                                                        You can also use {% raw %}`${{CF_BRANCH_TAG_NORMALIZED}}`{% endraw %} to get the branch name normalized. It will be without any chars that are illegal in case the branch name were to be used as the Docker image tag name. You can also use {% raw %}`${{CF_BRANCH_TAG_NORMALIZED_LOWER_CASE}}`{% endraw %} to force lowercase. | +| {% raw %}`${{CF_BASE_BRANCH}}`{% endraw %} | The base branch used during creation of Tag | +| {% raw %}`${{CF_PULL_REQUEST_ACTION}}`{% endraw %} | The pull request action. Values are those defined by your Git provider such as [GitHub](https://developer.github.com/webhooks/), [GitLab](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html), [Bitbucket](https://confluence.atlassian.com/bitbucket/manage-webhooks-735643732.html) etc. | +| {% raw %}`${{CF_PULL_REQUEST_TARGET}}`{% endraw %} | The pull request target branch | +| {% raw %}`${{CF_PULL_REQUEST_NUMBER}}`{% endraw %} | The pull request number | +| {% raw %}`${{CF_PULL_REQUEST_ID}}`{% endraw %} | The pull request id | +| {% raw %}`${{CF_PULL_REQUEST_LABELS}}`{% endraw %} | The labels of pull request (GitHub and GitLab only) | +| {% raw %}`${{CF_COMMIT_AUTHOR}}`{% endraw %} | Commit author. | +| {% raw %}`${{CF_BUILD_INITIATOR}}`{% endraw %} | The person (username) that started the build. If the build was started by a Git webhook (e.g. from a Pull request) it will hold the webhook user. Notice that if a build is restarted manually it will always hold the username of the person that restarted it. | +| {% raw %}`${{CF_ACCOUNT}}`{% endraw %} | Codefresh account for this build | +| {% raw %}`${{CF_COMMIT_URL}}`{% endraw %} | Commit url. | +| {% raw %}`${{CF_COMMIT_MESSAGE}}`{% endraw %} | Commit message of the Git repository revision, at the time of execution.
                                                        The messages quotes are escaped (i.e. ' is not \', " is now \"). | +| {% raw %}`${{CF_COMMIT_MESSAGE_ESCAPED}}`{% endraw %} | Commit message of the Git repository revision, at the time of execution.
                                                        Special characters are escaped. | +| {% raw %}`${{CF_REVISION}}`{% endraw %} | Revision of the Git repository of the main pipeline, at the time of execution.
                                                        You can also use {% raw %}`${{CF_SHORT_REVISION}}`{% endraw %} to get the abbreviated 7-character revision hash, as used in Git. Note: use this variable as string with quotes to tag the image {% raw %}`${{CF_SHORT_REVISION}}`{% endraw %} | +| {% raw %}`${{CF_VOLUME_NAME}}`{% endraw %} | Refers to the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) between [freestyle steps]({{site.baseurl}}/docs/pipelines/steps/freestyle/). Normally you only need to define this in [compositions]({{site.baseurl}}/docs/pipelines/steps/composition/). In freestyle steps, it is automatically present without any extra configuration. | +| {% raw %}`${{CF_VOLUME_PATH}}`{% endraw %} | Refers to the mounted path of the [shared volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) inside a Freestyle container. In the current implementation it expands to `/codefresh/volume`. | +| {% raw %}`${{CF_BUILD_TRIGGER}}`{% endraw %} | Will be an indication of the current build was triggered: *build: The build was triggered from the build button* webhook: The build was triggered from a control version webhook | +| {% raw %}`${{CF_BUILD_ID}}`{% endraw %} | The build id. Note: use this variable as string with quotes to tag the image {% raw %}`${{CF_BUILD_ID}}`{% endraw %} | +| {% raw %}`${{CF_BUILD_TIMESTAMP}}`{% endraw %} | The timestamp the build was created. Note: use this variable as string with quotes to tag the image {% raw %}`${{CF_BUILD_TIMESTAMP}}`{% endraw %} | +| {% raw %}`${{CF_BUILD_URL}}`{% endraw %} | The URL to the build in Codefresh | +| {% raw %}`${{CF_PIPELINE_NAME}}`{% endraw %} | The full path of the pipeline, i.e. "project/pipeline" | +| {% raw %}`${{CF_STEP_NAME}}`{% endraw %} | the name of the step, i.e. "MyUnitTests" | +| {% raw %}`${{CF_URL}}`{% endraw %} | The URL of Codefresh system | +| {% raw %}`${{CI}}`{% endraw %} | The value is always `true` | +| {% raw %}`${{CF_KUBECONFIG_PATH}}`{% endraw %} | Path to injected kubeconfig if at least one Kubernetes cluster [is configured]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/). You can easily run [custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/) since it is automatically setup by Codefresh in all pipelines. | +| Any variable specified in the pipeline settings | For example, if you configure the pipeline settings with a variable named PORT, you can put the variable in your YAML build descriptor as {% raw %}`${{PORT}}`{% endraw %}. | + +## Context-related Variables +Context-related variables are created dynamically during the workflow execution and according to the used steps. + +{: .table .table-bordered .table-hover} +| Variable | Description | +| ------------------------------------------------- | ------------------------------------------------------ | +| **Working Directories** | For example, you can set the working directory of step `A` with a variable named after a previously executed step, step `B`. Therefore, setting step `A` with {% raw %}`working-directory:${{B}}`{% endraw %} means that step `A` executes in the same working directory as step `B`. | +| **Images** | You can set the candidate field of the push step with a variable named after a previously executed build step. Since the details of a created image are not necessarily known ahead of time, the variable can create an association to an optionally dynamic image name. Therefore, setting push step `A` with {% raw %}`candidate:${{B}}`{% endraw %} means that step `A` will push the image built by step `B`. Note that this capability works only for `candidate` and `image` fields in Codefresh steps. | + +A very common pattern in Codefresh pipelines, is to create a Docker image in one step, and then run a command on its container in the next step (e.g. run [unit tests]({{site.baseurl}}/docs/testing/unit-tests/)): + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - ./my-unit-tests.sh +{% endraw %} +{% endhighlight %} + +In the example above you can see the `MyAppDockerImage` variable that denotes a Docker image created dynamically within this single pipeline. In the second step we use it as a Docker context in order to run unit tests. See also the [unit testing example app]({{site.baseurl}}/docs/yaml-examples/examples/run-unit-tests/). + +## Step variables + +Every [step]({{site.baseurl}}/docs/pipelines/steps/) in a Codefresh pipeline also exposes several built-in variables. You can access them via the global `steps` parent variable. + + * Each step creates a variable based on the name of the step. You can then use the members of each variable for status conditions such as: `steps.MyUnitTests.result == 'error'` for a step called `MyUnitTests`. + * To access variables that have a non-standard (i.e. only alphanumeric and _ characters) names, use the Variable() function. + +### Step Member variables + +Variables that are created by steps can have members. The members depend on the step type. For example if you have a build step named `myBuildStep` you can get the ID of the docker image that gets created with {% raw %}`echo ${{steps.myBuildStep.imageId}}`{% endraw %} + +{: .table .table-bordered .table-hover} +| Step Type | Members | +| ----------------------- | -------------------------------------- | +| All step types | {::nomarkdown}
                                                        • name
                                                        • type
                                                        • description
                                                        • workingDirectory
                                                        • result
                                                        {:/} +| [**Freestyle**]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) | - | +| [**Composition**]({{site.baseurl}}/docs/codefresh-yaml/steps/composition/) | - | +| [**Build**]({{site.baseurl}}/docs/codefresh-yaml/steps/build/) | {::nomarkdown}
                                                        • imageName
                                                        • imageTagName
                                                        • imageId
                                                        {:/} | +| [**Git-clone**]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) | {::nomarkdown}
                                                        • revision
                                                        • repo
                                                        {:/} | +| [**Push**]({{site.baseurl}}/docs/codefresh-yaml/steps/push/) | {::nomarkdown}
                                                        • registry
                                                        • imageId
                                                        • imageRepoDigest
                                                        {:/} | +| [**Approval**]({{site.baseurl}}/docs/codefresh-yaml/steps/approval/) | {::nomarkdown}
                                                        • authEntity.name
                                                        • authEntity.type
                                                        {:/} | + + + +## GitHub release variables + +GitHub allows you to create [releases](https://help.github.com/articles/creating-releases/) for marking specific Git tags for general availability. + +You can set a [trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/) for GitHub releases. When a GitHub release happens, the following variables are also available: + + + +{: .table .table-bordered .table-hover} +| Variable | Description | +| --------------- | ------------------------------------------------------ | +| {% raw %}`${{CF_RELEASE_NAME}}`{% endraw %} | GitHub release title | +| {% raw %}`${{CF_RELEASE_TAG}}`{% endraw %} | Git tag version | +| {% raw %}`${{CF_RELEASE_ID}}`{% endraw %} | Internal ID for this release | +| {% raw %}`${{CF_PRERELEASE_FLAG}}`{% endraw %} | true if the release if marked as non-production ready, false if it is ready for production | + +## GitHub Pull Request variables + +When a pull request is closed in GitHub, the following variables are also available + +{: .table .table-bordered .table-hover} +| Variable | Description | +| --------------- | ------------------------------------------------------ | +| {% raw %}`${{CF_PULL_REQUEST_MERGED}}`{% endraw %} | true if the pull request was merged to base branch | +| {% raw %}`${{CF_PULL_REQUEST_HEAD_BRANCH}}`{% endraw %} | the head branch of the PR (the branch that we want to merge to master) | +| {% raw %}`${{CF_PULL_REQUEST_MERGED_COMMIT_SHA}}`{% endraw %} | the commit SHA on the base branch after the pull request was merged (in most cases it will be master) | +| {% raw %}`${{CF_PULL_REQUEST_HEAD_COMMIT_SHA}}`{% endraw %} | the commit SHA on the head branch (the branch that we want to push) | + +## User-defined variables + +User variables can be defined at 6 levels: + +1. Manually within a step using the [export](http://linuxcommand.org/lc3_man_pages/exporth.html) command or in any **subsequent** step with the [cf_export]({{site.baseurl}}/docs/codefresh-yaml/variables/#using-cf_export-command) command +1. [Freestyle Step Definition]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/#examples) (using the `environment` field) +1. Specific build Execution (after clicking the "Build" button open the "Build Variables" section, or use the [CLI]({{site.baseurl}}/docs/integrations/codefresh-api/#example---triggering-pipelines)) +1. Pipeline Definition (under "Environment variables" section in the [pipeline view]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#creating-new-pipelines)) +1. [Shared Configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) (defined under your account settings, and used using the "Import from shared configuration" button under the "Environment Variables" section in the pipeline view) +1. Variables defined on the Project level (Under the variables tab on any project view) + +The options are listed in order of priority (from the most important to the least important), so in case of multiple variables defined at different locations with the same name, the order of overriding will be as listed here. + +For example if a pipeline variable is defined both in project level and as an execution parameter of a specific build, then the final result will be the value defined as a build parameter and the project level variable will not take effect. + +## Exporting environment variables from a freestyle step + +Steps defined inside steps are scoped to the step they were created in (even if you used the `export` command). In order to allow using variables across steps, we provide a shared file that facilitates variables importing and exporting. There are two ways to add variables to this file: + +### Using cf_export command +Within every freestyle step, the `cf_export` command allows you to export variables across steps (by writing to the shared variables file). + +> The variables exported with cf_export overrides those at the pipeline-level. + +You can either: +- Explicitly state a VAR=VAL pair +- State the name of an existing *exported* environment variable (like EXISTING_VAR). + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + freestyle-step-1: + description: Freestyle step.. + title: Free styling + image: alpine:latest + commands: + # Normal export will only work in a single step + - export EXISTING_VAR=www.example.com + + # CF export will now work in all other subsequent steps + - cf_export VAR1=alpine:latest VAR2=VALUE2 EXISTING_VAR + + freestyle-step-2: + description: Freestyle step.. + title: Free styling 2 + image: ${{VAR1}} + commands: + - echo $VAR2 + - echo http://$EXISTING_VAR/index.php +{% endraw %} +{% endhighlight %} + +Notice that `cf_export` has the same syntax structure as the [bash export command](https://www.gnu.org/software/bash/manual/html_node/Environment.html). This means that when you use it you **don't** need any dollar signs for the variable created/assigned. + +``` +cf_export $MY_VAR # Don't do this +cf_export MY_VAR # Correct syntax +``` + +Also notice that `cf_export` works on *subsequent* steps only. If you want to export a variable right away in the present step and all the rest of the steps you need to do the following: + +``` +export MY_VAR='example' # Will make MY_VAR available in this step only +cf_export MY_VAR='example' # Will also make MY_VAR available to all steps after this one +``` + +There is nothing really magic about `cf_export`. It is a normal script. You can see its contents on your own by entering the command `cat /codefresh/volume/cf_export` on any [Codefresh freestyle step]({{site.baseurl}}/docs/codefresh-yaml/steps/freestyle/) inside a pipeline. + +For more information on its limitations see the [troubleshooting page]({{site.baseurl}}/docs/troubleshooting/common-issues/cf-export-limitations/). + + + +### Directly writing to the file + +For more advanced use cases, you can write directly to the shared variable file that Codefresh reads to understand which variables need to be available to all steps. This file has a simple format where each line is a variable and its value in the form of `VARIABLE=VALUE`. The `cf_export` command mentioned in the previous section is just a shorthand for writing on this file. + +The variables file is available inside freestyle steps in the following path: **`{% raw %}${{CF_VOLUME_PATH}}{% endraw %}/env_vars_to_export`** + +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + freestyle-step-1: + description: Freestyle step.. + title: Free styling + image: alpine:latest + commands: + - echo VAR1=192.168.0.1 >> ${{CF_VOLUME_PATH}}/env_vars_to_export + - echo hey=alpine:3.9 >> ${{CF_VOLUME_PATH}}/env_vars_to_export + + freestyle-step-2: + description: Freestyle step.. + title: Free styling 2 + image: ${{hey}} + commands: + - echo http://$VAR1/index.php +{% endraw %} +{% endhighlight %} + +Use this technique if you have complex expressions that have issues with the `cf_export` command. + +## Masking variables in logs + +Codefresh has the built-in capabililty to automatically mask variables in logs if they are encrypted. The values of encrypted variables will be replaced with asterisks in build logs. + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/variables/masked-variables.png" +url="/images/pipeline/codefresh-yaml/variables/masked-variables.png" +alt="Masked variables" +caption="Masked variables" +max-width="80%" +%} + +The variables can be defined in any of the usual ways Codefresh offers such as [shared configuration]({{site.baseurl}}/docs/configure-ci-cd-pipeline/shared-configuration/) or [within the pipeline]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/#pipeline-settings): + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/variables/encrypted-variables.png" +url="/images/pipeline/codefresh-yaml/variables/encrypted-variables.png" +alt="Encrypted variables" +caption="Encrypted variables" +max-width="60%" +%} + +>Notice that this feature is currently available only in Enterprise accounts. + + +## Escape characters +When passing special characters through environmental variables `\` can be used as an escape character. For example if you were passing a cassandra connection string you might do something like `Points\=hostname\;Port\=16376\;Username\=user\;Password\=password` + +This will safely escape `;` and `=`. + +## Related articles +[Pipeline steps]({{site.baseurl}}/docs/codefresh-yaml/steps/) +[Codefresh Conditionals]({{site.baseurl}}/docs/codefresh-yaml/conditional-execution-of-steps/) diff --git a/_docs/pipelines/what-is-the-codefresh-yaml.md b/_docs/pipelines/what-is-the-codefresh-yaml.md new file mode 100644 index 00000000..4be99e30 --- /dev/null +++ b/_docs/pipelines/what-is-the-codefresh-yaml.md @@ -0,0 +1,378 @@ +--- +title: "Pipeline definitions YAML" +description: "How to define Codefresh pipelines in a declarative manner" +group: pipelines +redirect_from: + - /docs/codefresh-yaml/ + - /docs/what-is-the-codefresh-yaml + - /docs/what-is-the-codefresh-yaml/ + - /docs/codefresh-yaml/working-directories/ + - /docs/working-directories/ +toc: true +--- + +Codefresh offers its own built-in format for creating pipelines. The pipeline specification is +based on the YAML syntax allowing you to describe your pipelines in a completely declarative manner. + +Using Codefresh yaml is the recommended way to [create pipelines]({{site.baseurl}}/docs/pipelines/pipelines/). + +## Simple example for codefresh.yml + +Here is a very minimal example: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + build_image: + type: build + description: Building the image... + image-name: myuser/myservice + tag: develop # {% raw %}${{CF_BRANCH}}{% endraw %} + + perform_tests: + image: node:5 + working_directory: {% raw %}${{main_clone}}{% endraw %} + description: Performing unit tests... + commands: + - npm install gulp -g + - npm install + - gulp unit_test +{% endhighlight %} + +It contains two [steps]({{site.baseurl}}/docs/pipelines/steps/), one named *build_image* that creates a docker image, and another one called *perform_tests* that runs unit test with `gulp`. + +If you want to know more about how steps work in Codefresh make sure to read [the introduction to pipelines]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/) first, before moving on. + +## Basic pipeline syntax + +You can customize your build environment (pipeline) by using the Codefresh YAML file, ```codefresh.yml```. Codefresh uses the build specifications in the ```codefresh.yml``` file to execute your build. The ```codefresh.yml``` can be basic or it can include intricate build specifications. + +A YAML file is comprised of a series of steps that are executed in the order in which they are specified. + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' + +steps: + step-name: + [step-contents] + another-step: + [step-contents] + the-very-last-step: + [step-contents] +{% endhighlight %} + +You must define a step type for each step, unless you are using a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). Each step uses Docker images and containers as facilitators for execution. For example, the **Freestyle** step spins up a container and executes the specified shell commands from the YAML file. + +The step names should be unique within the same pipeline. This mainly affects the visualization of the pipeline when it runs. + +Each step produces a resource, which you can [reference](https://github.com/codefresh-contrib/python-flask-sample-app/blob/master/codefresh.yml#L23) in other steps, and are executed in real-time. For example, a **Freestyle** step can reference an image that was produced by a [**Build**]({{site.baseurl}}/docs/pipelines/steps/build/) step. This allows you to chain steps together and create highly-customized builds. + + +##### Variables + +Steps chaining and referencing is possible due to implementation of variables in the YAML file - read more on relevant [section]({{site.baseurl}}/docs/pipelines/variables/). + + +{: .table .table-bordered .table-hover} +| Step Type | Description | +| ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- | +| [Freestyle]({{site.baseurl}}/docs/pipelines/steps/freestyle/) | Executes one or more shell commands in a container similar to `docker run`. | +| [Build]({{site.baseurl}}/docs/pipelines/steps/build/) | Builds a Docker image like `docker build`. | +| [Push]({{site.baseurl}}/docs/pipelines/steps/push/) | Pushes a Docker image to an external registry similar to `docker tag` and `docker push`. | +| [Git Clone]({{site.baseurl}}/docs/pipelines/steps/git-clone/) | Overrides the default git clone behavior. | +| [Composition]({{site.baseurl}}/docs/pipelines/steps/composition/) | Starts a Docker Composition like `docker-compose`. Discarded once pipelines finishes. | +| [Launch Composition]({{site.baseurl}}/docs/pipelines/steps/launch-composition/) | Starts a long term Docker composition that stays up after the end of the pipeline. | +| [Deploy]({{site.baseurl}}/docs/pipelines/steps/deploy/) | Deploys to Kubernetes clusters. | +| [Approval]({{site.baseurl}}/docs/pipelines/steps/approval/) | Pauses a pipeline and waits for human intervention. | + + +For more information on creating your own step, see the [Steps in piplines]({{site.baseurl}}/docs/pipelines/steps/). + +You can also see the [full YAML specification]({{site.baseurl}}/docs/integrations/codefresh-api/#full-pipeline-specification) supported for pipelines. Note however that several fields are only accessible by using the [Codefresh API]({{site.baseurl}}/docs/integrations/codefresh-api) or [CLI](https://codefresh-io.github.io/cli/). + +## Yaml validation + +If you are editing Codefresh yaml within the Codefresh UI, the editor will automatically highlight errors as they happen. + +This allows you to make quick edits (and possibly run some builds) straight from the GUI. Once you are happy with your pipeline you should commit it to your repository as `codefresh.yml` (pipeline as code). + +{% include +image.html +lightbox="true" +file="/images/pipeline/codefresh-yaml/inline-editor.png" +url="/images/pipeline/codefresh-yaml/inline-editor.png" +alt="Graphical Inline Yaml Editor" +caption="Graphical Inline Yaml Editor" +max-width="50%" +%} + +You can also validate the pipeline yaml outside of the UI by using the [Codefresh CLI](https://codefresh-io.github.io/cli/). The CLI has a [validate parameter](https://codefresh-io.github.io/cli/validation/) that can check one or more files for syntax errors + +{% highlight shell %} +{% raw %} +$ codefresh validate codefresh.yml +Yaml not valid: + - "invalid-property" is not allowed +{% endraw %} +{% endhighlight %} + +For more information on where the YAML file can be stored see the [creating pipelines page]({{site.baseurl}}/docs/configure-ci-cd-pipeline/pipelines/). + +## Execution flow + +By default, Codefresh will execute all steps in the yaml file and instantly fail the build, if any step +presents an error. To change this behavior add the `fail_fast:false` property in any step that you wish to be ignored +in case of errors. + +For example, if you have a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/) that runs integration tests, and you don't want the whole pipeline +to fail if any of the tests fail, add the `fail_fast` line to that step: + + +{% highlight yaml %} +perform_tests: + image: node:9 + description: Running integration tests + fail_fast: false + commands: + - gulp integration_test +{% endhighlight %} + +Now the pipeline will continue to run even if the step `perform_tests` fails. + +Notice also that by default Codefresh pipelines run in *sequential mode*. All steps will be executed one after +the other and in the same order as included in the `codefresh.yml` file. + +If you wish to use parallel steps in your pipelines, see the [parallel steps]({{site.baseurl}}/docs/pipelines/advanced-workflows/) page. + +## Working directories + +In the context of a step, a working directory can be of the following type: + +{: .table .table-bordered .table-hover} +| Working Directory | Description | +| --------------------- | -------------------------------------------- | +| Empty | Defaults to the [Codefresh volume]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#sharing-the-workspace-between-build-steps) (found at `/codefresh/volume`). If there is a [git clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) with the special name `main_clone` then the default working directory for built-in steps is now the [project folder]({{site.baseurl}}/docs/pipelines/introduction-to-codefresh-pipelines/#cloning-the-source-code) that was checked out - this only applies to [built-in]({{site.baseurl}}/docs/pipelines/steps/#built-in-steps) Codefresh steps and not [custom plugins]({{site.baseurl}}/docs/pipelines/steps/#creating-a-typed-codefresh-plugin). | +| Variable that contains the ID of a [Git-Clone]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) step | Runs the step within the cloned directory. | +| Variable that contains the ID of any other step | Runs the step within the same working directory that the specified was executed. This option is not available for for [**Git-Clone**]({{site.baseurl}}/docs/codefresh-yaml/steps/git-clone/) steps. | +| Absolute filesystem path | Treated as is within the container. | +| Relative filesystem path | Treated as relative path from the cloned directory of the service | +| 'IMAGE_WORK_DIR' | Use this value in order to use the image working directory for example:
                                                        `working_directory: IMAGE_WORK_DIR` | + + +## Retrying a step + +Sometimes you want to retry a step that has a problem. Network hiccups, transient failures and flaky test environments are common problems that prevent pipelines from working in a predictable manner. + +Codefresh allows you to retry any of your steps with the built-in syntax: + + `yaml` +{% highlight yaml %} +{% raw %} +step-name: + [step-contents] + retry: + maxAttempts: 5 + delay: 5 + exponentialFactor: 2 +{% endraw %} +{% endhighlight %} + +The `retry:` block has the following parameters: + + * `maxAttempts` defines how many times this step will run again if there are execution errors (default is 1 and the Max. is 10). + * `delay` is the number of seconds to wait before each attempt (default is 5 seconds and the Max. is 60 seconds). + * `exponentialFactor` defines how many times the delay should be multiplied by itself after each attempt (default is 1 and Max. is 5). + +All parameters are optional. The exponentialFactor works like this: +* exponentialFactor=1, delay=5 => each time wait 5 seconds before trying again, no matter the number of attempts. +* exponentialFactor=2, delay=5 => first retry will have a delay of 25 seconds, third will have 125 and so on. + + +Here is a full example: + + `codefresh.yml` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + MyAppDockerImage: + title: Building Docker Image + type: build + image_name: my-own-app + retry: + maxAttempts: 2 + MyUnitTests: + title: Running Unit tests + image: ${{MyAppDockerImage}} + commands: + - ./my_unit_tests.sh + retry: + maxAttempts: 3 + delay: 5 + PushingToRegistry: + type: push + title: Pushing To Registry + candidate: ${{MyAppDockerImage}} + tag: '${{CF_BRANCH}}' + retry: + maxAttempts: 3 + delay: 3 + exponentialFactor: 2 +{% endraw %} +{% endhighlight %} + +Notice that Codefresh also provides the following variables that allow you change your script/applications according to the retry attempts: + +* `CF_CURRENT_ATTEMPT` contains the number of current retry attempt. +* `CF_MAX_ATTEMPTS` contains all the number of total attempts defined. + +The retry mechanism is available for all kinds of [steps]({{site.baseurl}}/docs/pipelines/steps/). + +## Escaping strings + +If you want to use strings inside your pipeline that create conflicts with the Codefresh syntax parser (for example they are YAML themselves) you need +to escape them using multi-line strings with the `>-` and `|-` characters. + +The following pipeline is not parsed correctly because the echo command is using the yaml `:` character + +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + test: + title: "Running test" + type: "freestyle" + image: "alpine:3.9" + commands: + - echo hello: world +{% endraw %} +{% endhighlight %} + +You can fix this issue by using a multi-line YAML string: + +{% highlight yaml %} +{% raw %} +version: "1.0" +steps: + test: + title: "Running test" + type: "freestyle" + image: "alpine:3.9" + commands: + - |- + echo hello: world +{% endraw %} +{% endhighlight %} + +The `|-` character keeps the line breaks of the text (but removes the last one). Use the `>-` character if you want to convert line breaks to spaces. +For more information see the [YAML specification](https://yaml.org/spec/1.2/spec.html). + +## Using YAML anchors to avoid repetition + +Codefresh also supports yaml anchors, references and extends. These allow you to keep +your pipeline [DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself). + +For example, let's say that you have two freestyle steps: + +1. The first one fills a MySQL server with data. +1. The second one runs integration tests that use the MySQL server. + +Here is the respective pipeline: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + preLoadDatabase: + title: Loading Data + image: alpine + commands: + - printenv + - echo "Loading DB" + environment: &my_common_envs + - MYSQL_HOST=mysql + - MYSQL_USER=user + - MYSQL_PASS=password + - MYSQL_PORT=3351 + runTests: + title: Integration tests + image: alpine + commands: + - printenv + - echo "Running tests" + environment: *my_common_envs # Same MYSQL_HOST, MYSQL_USER etc. +{% endhighlight %} + +Instead of repeating the same environment variables in both steps, we can create them once and then just reference them in the second step with the `*` character. + +You also define anchors at the top of the pipeline in the special `indicators` block: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' + +indicators: + - environment: &my_common_envs + - MYSQL_HOST=mysql + - MYSQL_USER=user + - MYSQL_PASS=password + - MYSQL_PORT=3351 + +steps: + preLoadDatabase: + title: Loading Data + image: alpine + commands: + - printenv + - echo "Loading DB" + environment: *my_common_envs # Same MYSQL_HOST, MYSQL_USER etc. + runTests: + title: Integration tests + image: alpine + commands: + - printenv + - echo "Running tests" + environment: *my_common_envs # Same MYSQL_HOST, MYSQL_USER etc. + +{% endhighlight %} + + +Finally. you also extend steps like below: + + `codefresh.yml` +{% highlight yaml %} +version: '1.0' +steps: + deploy_to_k8_staging: &my_basic_deployment + title: deploying to cluster + type: deploy + kind: kubernetes + cluster: myStagingCluster + namespace: sales + service: my-python-app + deploy_to_k8_prod: + <<: *my_basic_deployment + cluster: myProdCluster # only cluster differs, everything else is the same + +{% endhighlight %} + +Here we deploy to two kubernetes clusters. The first step defines the staging deployment. +For the second step, we extend the first one and only change the name of the cluster +to point to production. Everything else (i.e. namespace and service) are exactly the same. + + +## Related articles +[Steps in CI pipelines]({{site.baseurl}}/docs/pipelines/steps/) +[Variables in CI pipelines]({{site.baseurl}}/docs/pipelines/variables/) +[Advanced workflows]({{site.baseurl}}/docs/pipelines/advanced-workflows/) +[Creating pipelines]({{site.baseurl}}/docs/pipelines/pipelines/) +[YAML examples]({{site.baseurl}}/docs/example-catalog/examples/) + + + + + + + diff --git a/_docs/reference/behind-the-firewall.md b/_docs/reference/behind-the-firewall.md new file mode 100644 index 00000000..b01ba138 --- /dev/null +++ b/_docs/reference/behind-the-firewall.md @@ -0,0 +1,248 @@ +--- +title: "Runner installation behind firewalls" +description: "Run Codefresh Pipelines in your own secure infrastructure" +group: installation +redirect_from: + - /docs/enterprise/behind-the-firewall/ +toc: true + +--- + +As described in [installation options]({{site.baseurl}}/docs/installation/installation-options/) Codefresh offers CI/CD and GitOps installation environments, each with its own installation options. +This articles focuses on the CI/CD Hybrid installation option with the Codefresh Runner and its advantages. + +## Running Codefresh CI/CD in secure environments + +Codefresh CI/CD has an on-premises installation in which the Codefresh CI/CD platform is installed on the customer's premises. While +this solution is very effective as far as security is concerned, it places a lot of overhead on the customer, as all updates +and improvements done in the platform must also be transferred to the customer premises. + +Hybrid CI/CD places a Codefresh Runner within the customer premises, and the UI (and management platform) stays in the Codefresh SaaS. + +Here is the overall architecture: + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/architecture.png" + url="/images/administration/behind-the-firewall/architecture.png" + alt="Codefresh Hybrid CD/CD behind the firewall" + caption="Codefresh Hybrid CD/CD behind the firewall" + max-width="100%" + %} + +The advantages for this scenario are multi-fold. + +Regarding platform maintenance: + + 1. Codefresh is responsible for the heavy lifting for platform maintenance, instead of the customer. + 1. Updates to the UI, build engine, integrations etc., happen automatically, without any customer involvement. + 1. Actual builds run in the customer premises under fully controlled conditions. + 1. Codefresh Runner is fully automated. It handles volume claims and build scheduling on its own within the Kubernetes cluster it is placed. + +Regarding security of services: + + 1. Pipelines can run in behind-the-firewall clusters with internal services. + 1. Pipelines can use integrations (such as Docker registries) that are private and secure. + 1. Source code does not ever leave the customer premises. + +Regarding firewall security: + + 1. Uni-directional, outgoing communication between the Codefresh Runner and Codefresh CI/CD Platform. The Runner polls the Codefresh platform for jobs. + 1. Codefresh SaaS never connects to the customer network. No ports need to be open in the customer firewall for the runner to work. + 1. The Codefresh Runner is fully open-sourced, so its code can be scrutinized by any stakeholder. + + + +## Using secure services in your CI pipelines + +After installing the [Codefresh Runner]({{site.baseurl}}/docs/installation/codefresh-runner/) on your private Kubernetes cluster in your infrastructure, all CI pipelines in the private Kubernetes cluster have access to all other internal services that are network reachable. + +You can easily create CI pipelines that: + + * Use databases internal to the company + * Run integration tests against services internal to the company + * Launch [compositions]({{site.baseurl}}/docs/pipelines/steps/composition/) that communicate with other secure services + * Upload and download artifacts from a private artifact repository (e.g., Nexus or Artifactory) + * Deploy to any other cluster accessible in the secure network + * Create infrastructure such as machines, load balancers, auto-scaling groups etc. + + Any of these CI pipelines will work out the box without extra configuration. In all cases, + all data stays witin the private local network and does not exit the firewall. + + >Notice that [long-running compositions]({{site.baseurl}}/docs/pipelines/steps/composition/) (preview test environments) are not yet available via the Codefresh build runner. + + + +### Checking out code from a private GIT repository + +To check out code from your private Git repository, you need to connect first to Codefresh via [GIT integrations]({{site.baseurl}}/docs/integrations/git-providers/). However, once you define your GIT provider as *on premise* you also +need to mark it as *behind the firewall* as well: + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/behind-the-firewall-toggle.png" + url="/images/administration/behind-the-firewall/behind-the-firewall-toggle.png" + alt="Behind the firewall toggle" + caption="Behind the firewall toggle" + max-width="100%" + %} + +Once you do that save your provider and make sure that it has the correct tags. The name you used for the git provider will also be used in the pipeline. You cannot "test the connection" because +the Codefresh SAAS doesn't have access to your on-premises GIT repository. + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/behind-the-firewall-tag.png" + url="/images/administration/behind-the-firewall/behind-the-firewall-tag.png" + alt="Behind the firewall tags" + caption="Behind the firewall tags" + max-width="100%" + %} + +To check out code just use a [clone step]({{site.baseurl}}/docs/pipelines/steps/git-clone/) like any other clone operation. +The only thing to remember is that the GIT URL must be fully qualified. You need to [create a pipeline]({{site.baseurl}}/docs/pipelines/pipelines/#pipeline-creation-modes) on it its own from the *Pipelines* section of the left sidebar (instead of one adding a git repository to Codefresh) + + + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + main_clone: + type: git-clone + description: Step description + repo: https://github-internal.example.com/my-username/my-app + git: my-internal-git-provider + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: my-image + tag: '${{CF_BRANCH_TAG_NORMALIZED}}-${{CF_SHORT_REVISION}}' + dockerfile: Dockerfile +{% endraw %} +{% endhighlight %} + +Once you trigger the CI pipeline, the Codefresh builder will communicate with your private GIT instance and checks out code. + +>Note that currently there is a limitation on the location of the `codefresh.yml` file. Only the [inline mode]({{site.baseurl}}/docs/pipelines/pipelines/#writing-codefresh-yml-in-the-gui) is supported. Soon we will allow the loading of the pipeline from the Git repository itself. + +You can also use a [network proxy]({{site.baseurl}}/docs/pipelines/steps/git-clone/#using-git-behind-a-proxy) for the Git clone step. + +#### Adding triggers from private GIT repositories + + +In the previous section we have seen how a CI pipeline can check out code from an internal Git repository. We also need to set up a trigger, +so that every time a commit or any other supported event occurs, the Codefresh CI pipeline is triggered automatically. + +If you have installed the [optional app-proxy]({{site.baseurl}}/docs/installation/codefresh-runner/#optional-installation-of-the-app-proxy), adding a trigger can be done exactly like the SAAS version of Codefresh, using only the Codefresh UI. + +If you haven't installed the app-proxy, then adding a Git trigger is a two-step process: + +1. First we set up a webhook endpoint in Codefresh. +1. Then we create the webhook call in the side of the the GIT provider. + +> To support triggers based on PR (Pull Request) events, it is mandatory to install `app-proxy`. + +For the Codefresh side, follow the usual instructions for creating a [basic git trigger]({{site.baseurl}}/docs/configure-ci-cd-pipeline/triggers/git-triggers/). + +Once you select your GIT provider, you need to manually enter your username and repository that you wish to trigger the build. + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/enter-repo-details.png" + url="/images/administration/behind-the-firewall/enter-repo-details.png" + alt="Entering repository details" + caption="Entering repository details" + max-width="60%" + %} + +All other details (git events, branch naming, monorepo pattern, etc.) are still the same as normal SAAS GIT providers. +Once that is done, Codefresh will show you the webhook endpoint along with a secret for triggering this pipeline. Note them down. + + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/codefresh-webhook.png" + url="/images/administration/behind-the-firewall/codefresh-webhook.png" + alt="Codefresh webhook details" + caption="Codefresh webhook details" + max-width="60%" + %} + +This concludes the setup on the Codefresh side. The final step is create a webhook call on the side of your GIT provider. +The instructions are different per GIT provider: + +* [GitHub webhooks](https://developer.github.com/webhooks/) +* [GitLab webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html) +* [Stash webhooks](https://confluence.atlassian.com/bitbucketserver/managing-webhooks-in-bitbucket-server-938025878.html) + +In all cases make sure that the payload is JSON, because this is what Codefresh expects. + +* For GitHub the events monitored should be `Pull requests` and `Pushes`. +* For GitLab the events monitored should be `Push events`,`Tag push events` and `Merge request events`. + +After the setup is finished, the Codefresh pipeline will be executed every time a git event happens. + +### Accessing an internal docker registry + +To access an internal registry just follow the instructions for [adding registries]({{site.baseurl}}/docs/docker-registries/external-docker-registries/) . Like GIT repositories +you need to mark the Docker registry as *Behind the firewall*. + +Once that is done, use the [push step]({{site.baseurl}}/docs/codefresh-yaml/steps/push/) as usual with the name you gave to the registry during the integration setup. + + +`YAML` +{% highlight yaml %} +{% raw %} +version: '1.0' +steps: + gitClone: + type: git-clone + description: Step description + repo: https://github-internal.example.com/my-username/my-app + git: my-internal-git-repo + BuildingDockerImage: + title: Building Docker Image + type: build + image_name: my-image + dockerfile: Dockerfile + PushingDockerImage: + title: Pushing a docker image + type: push + candidate: '${{BuildingDockerImage}}' + tag: '${{CF_BRANCH}}' + registry: my-internal-docker-registry +{% endraw %} +{% endhighlight %} + + +### Deploying to an internal Kubernetes cluster + +To connect a cluster that is behind the firewall follow the [connecting cluster guide]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/), paying attention to the following two points: + +1. Your cluster should be added as a [Custom provider]({{site.baseurl}}/docs/deploy-to-kubernetes/add-kubernetes-cluster/#adding-any-other-cluster-type-not-dependent-on-any-provider) +1. You need to mark the cluster as internal by using the toggle switch. + + + + +{% include image.html + lightbox="true" + file="/images/administration/behind-the-firewall/cluster-behind-firewall.png" + url="/images/administration/behind-the-firewall/cluster-behind-firewall.png" + alt="Marking a Kubernetes cluster as internal" + caption="Marking a Kubernetes cluster as internal" + max-width="60%" + %} + +The cluster where the runner works on should have network connectivity with the cluster you wish to deploy to. + +>Notice that the service account used in the cluster configuration is completely independent from the privileges granted to the Codefresh build runner. The privileges needed by the runner are only used to launch Codefresh pipelines within your cluster. The Service account used in the "custom provider" setting should have the needed privileges for deployment. + +Once your cluster is connected you can use any of the familiar deployment methods such as the [dedicated deploy step]({{site.baseurl}}/docs/deploy-to-kubernetes/deployment-options-to-kubernetes/) or [custom kubectl commands]({{site.baseurl}}/docs/deploy-to-kubernetes/custom-kubectl-commands/). + +## Related articles +[Codefresh installation options]({{site.baseurl}}/docs/installation/installation-options/) +[Google marketplace integration]({{site.baseurl}}/docs/integrations/ci-integrations/google-marketplace/) +[Managing your Kubernetes cluster]({{site.baseurl}}/docs/deployments/kubernetes/manage-kubernetes/) diff --git a/_docs/reference/git-tokens.md b/_docs/reference/git-tokens.md deleted file mode 100644 index 25a8994a..00000000 --- a/_docs/reference/git-tokens.md +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: "Git tokens" -description: "" -group: reference -redirect_from: - - /docs/administration/git-tokens/ -toc: true ---- - - - -Codefresh requires two types of Git tokens for authentication: -* Git runtime token for runtime installation - Used by: - * Argo CD clone repositories and pull changes to sync the desired state in Git to the live state on the cluster. - * Argo Events to create webhooks in Git repositories for Event Sources in Delivery Pipelines - - The Git runtime token is runtime-specific but not user-specific. - - -* Git user token, a user-specific personal access token for each runtime, unique to every user - Unique to every user, the Git user token is used to authenticate the user for client-based actions, such as Git clone and push operations on specific repositories. - Git user token requirements translate to permission scopes which differ for the different Git providers. - - After installation, you need to authorize Git access for every provisioned runtime either through OAuth2 or through a personal access token from your Git provider. - Every user can view the list of runtimes and tokens assigned to each runtime in [User Settings](https://g.codefresh.io/2.0/user-settings){:target="\_blank"}. Codefresh flags and notifies you of invalid, revoked, or expired tokens. - - - - -### Git runtime token scopes -The Git runtime token is mandatory for runtime installation. - -{::nomarkdown} -
                                                        -{:/} - -#### GitHub and GitHub Enterprise runtime token scopes - -* `repo` -* `admin:repo_hook` - -{::nomarkdown} -
                                                        -{:/} - -#### GitLab Cloud and GitLab Server runtime token scopes - -* `api` -* `read_repository` - -{::nomarkdown} -
                                                        -{:/} - -#### Bitbucket Cloud and Bitbucket Server runtime token scopes - -* **Account**: `Read` -* **Workspace membership**: `Read` -* **Webhooks**: `Read and write` -* **Repositories**: `Write`, `Admin` - -{::nomarkdown} -

                                                        -{:/} - -### Git personal tokens -The Git personal token is a user-specific personal access token per provisioned runtime. Unique to each user, it may be required after to authenticate Git-based actions per runtime in Codefresh, based on how your admin has set up authentication for Git providers. - -> If you have access to multiple runtimes, you can use the same personal access token for all the runtimes. - You must configure the token for each runtime. - -{::nomarkdown} -
                                                        -{:/} - -#### GitHub and GitHub Enterprise personal user token scopes -* `repo` - - -{::nomarkdown} -
                                                        -{:/} - -#### GitLab Cloud and GitLab Server personal user token scopes - -* `write_repository` (includes `read-repository`) -* `api-read` - -{::nomarkdown} -
                                                        -{:/} - -#### Bitbucket Cloud and Server personal user token scopes - -* **Account**: `Read` -* **Workspace membership**: `Read` -* **Repositories**: `Write`, `Admin` - -### Related articles -[User settings]({{site.baseurl}}/docs/administration/user-settings/) \ No newline at end of file diff --git a/_docs/reference/secrets.md b/_docs/reference/secrets.md deleted file mode 100644 index b306c751..00000000 --- a/_docs/reference/secrets.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: "Secrets" -description: "Learn how Codefresh stores secrets" -group: reference -toc: true ---- - - -Codefresh provides out-of-the-box management for secrets, generally to store secrets for third-party integrations. - -For secure secret storage, every Codefresh runtime uses the [Bitnami Sealed Secrets controller](https://github.com/bitnami-labs/sealed-secrets){:target="_blank"} behind the scenes. -This controller is installed as part of the runtime and automatically managed by Codefresh. - -### How Sealed Secrets work - -Sealed Secrets are based on [public/private key encryption](https://en.wikipedia.org/wiki/Public-key_cryptography){:target="_blank"}. When the controller is installed, it gets a public and private key. The private key stays within the cluster. The public key can be given anywhere to encrypt secrets. - -Any kind of secret can be encrypted with the public key (also via the `kubeseal` executable), and then passed to the cluster for decryption when needed. - -For GitOps applications, encryption for secrets is critical, as it means that you can commit any kind of secret in Git as long as it is encrypted. - -Here's the event flow for Sealed Secrets: - -1. A secret is encrypted by an operator and/or developer with the `kubeseal` executable. -1. A custom Kubernetes resource called SealedSecret is created. -1. The secret is committed in Git. -1. During application deployment, the Codefresh runtime applies this secret to the cluster. -1. The Sealed Secret controller identifies the Sealed Secret object and decrypts it using the private key of the cluster. -1. The Sealed Secret is converted to a [standard Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) {:target="_blank"} inside the cluster. -1. It is then passed to the application like any other secret, as a mounted file or environment variable. -1. The application uses the secret in its decrypted form. - -For more details, you can read our [blog post for sealed secrets](https://codefresh.io/blog/handle-secrets-like-pro-using-gitops/){:target="_blank"}. - -### Configuring the Sealed Secrets controller - -The Sealed Secret controller is fully managed by the Codefresh runtime, and secret encryption and decryption are fully automated. - -> **Warning!** DO NOT tamper with the controller or its private/public keys in any way. - -The applications you deploy with Codefresh should also have no knowledge of the controller. All secrets that you need in your own applications should be accessed using the standard Kubernetes methods. - -### What to read next -[Set up a hosted (Hosted GitOps environment]({{site.baseurl}}/docs/runtime/hosted-runtime) -[Install hybrid runtimes]({{site.baseurl}}/docs/runtime/installation) -[Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview) - diff --git a/_docs/reference/shared-configuration.md b/_docs/reference/shared-configuration.md deleted file mode 100644 index 5153b962..00000000 --- a/_docs/reference/shared-configuration.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: "Shared configuration repo" -description: "" -group: reference -toc: true ---- - - -A Codefresh account with a hosted or a hybrid runtime can store configuration manifests for account-level resources in a Git repository. This repository can be shared with other runtimes in the same account, avoiding the need to create and maintain configuration manifests for every runtime. - -* Hosted runtimes - As part of the setup for a hosted runtime, Codefresh creates the shared configuration repository in the selected organization, together with the default Git Source repo. See [Connect Git provider]({{site.baseurl}}/docs/runtime/hosted-runtime/#2-connect-git-provider) in Hosted GitOps setup. - -* Hybrid runtimes - When you install the first hybrid runtime for an account, you can manually define the shared configuration repo through the `--shared-config-repo` flag. Or, you can allow Codefresh to automatically create the shared configuration repo in the runtime installation repo, in `shared-config` root. See [Installing hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/). - - For older versions of hybrid runtimes, upgrade the runtime to create the shared configuration repo, as described in [Upgrading hybrid runtimes](#upgrading-hybrid-runtimes) later in this article. - -> Currently, Codefresh supports a single shared configuration repo per account. - - -### Shared configuration repo structure -Below is a representation of the structure of the repository with the shared configuration. -See a [sample repo](https://github.dev/noam-codefresh/shared-gs){:target="\_blank"}. - -``` -. -├── resources <───────────────────┐ -│   ├── all-runtimes-all-clusters │ -│   │   ├── cm-all.yaml │ -│   │   └── subfolder │ -│   │   └── manifest2.yaml │ -│   ├── control-planes │ -│   │ └── manifest3.yaml │ -│   ├── runtimes │ -│   │ ├── runtime1 │ -│   │ │ └── manifest4.yaml │ -│   │ └── runtime2 │ -│   │ └── manifest5.yaml │ -│   └── manifest6.yaml │ -└── runtimes │ - ├── production │ # referenced by /apps/runtime1/config_dir.json - │   ├── in-cluster.yaml ─┤ # manage `include` field to decide which dirs/files to sync to cluster - │   └── remote-cluster.yaml ─┤ # manage `include` field to decide which dirs/files to sync to cluster - └── staging │ # referenced by /apps/runtime2/config_dir.json - └── in-cluster.yaml ─┘ # manage `include` field to decide which dirs/files to sync to cluster -``` -{::nomarkdown} -
                                                        -{:/} - -#### `resources` directory - -The `resources` directory holds the resources shared by all clusters managed by the runtime: - - * `all-runtimes-all-clusters`: Every resource manifest in this directory is applied to all the runtimes in the account, and to all the clusters managed by those runtimes. - * `control-planes`: Optional. Valid for hosted runtimes only. When defined, every resource manifest in this directory is applied to each hosted runtime’s `in-cluster`. - * `runtimes/`: Optional. Runtime-specific subdirectory. Every resource manifest in a runtime-specific subdirectory is applied to only that runtime. `manifest4.yaml` in the above example is applied only to `runtime1`. - -{::nomarkdown} -
                                                        -{:/} - -#### `runtimes` directory -Includes subdirectories specific to each runtime installed in the cluster, always with `in-cluster.yaml`, and optionally, application manifests for other clusters. - -**Example application manifest for in-cluster.yaml** - -```yaml -apiVersion: argoproj.io/v1alpha1 -kind: Application -metadata: - labels: - codefresh.io/entity: internal-config - codefresh.io/internal: 'true' - name: in-cluster -spec: - project: default - source: - repoURL: - path: resources # or shared-config/resources - directory: - include: '{all-runtimes-all-clusters/*.yaml,all-runtimes-all-clusters/**/*.yaml,runtimes//*.yaml,runtimes//**/*.yaml,control-planes/*.yaml,control-planes/**/*.yaml}' - recurse: true - destination: - namespace: - server: https://kubernetes.default.svc - syncPolicy: - automated: - allowEmpty: true - prune: true - selfHeal: true - syncOptions: - - allowEmpty=true -``` - - -### Git Source application per runtime -In addition to the application manifests for the runtimes in the shared configuration repository, every runtime has a Git Source application that references `runtimes/`. - -This Git Source application creates an application manifest with the `` for every cluster managed by the runtime. The `include` field in the `` application manifest determines which subdirectories in the `resources` directory are synced to the target cluster. - - -### Adding resources -When creating a new resource, such as a new integration for example in the Codefresh UI, you can define the runtimes and clusters to which to apply that resource. The app-proxy saves the resource in the correct location and updates the relevant Argo CD Applications to include it. - -### Upgrading hybrid runtimes -Older hybrid runtimes that do not have the shared configuration repository must be upgraded to the latest version. -You have two options to define the shared configuration repository during upgrade: -* Upgrade the hybrid runtime, and let the Codefresh app-proxy automatically create the shared configuration repo automatically. -* Manually define the shared configuration repository, by adding the `--shared-config-repo` flag in the runtime upgrade command. - ->If the shared configuration repo is not created for an account, Codefresh creates it in the installation repo, in `shared-config` root. - -If the hybrid runtime being upgraded has managed clusters, once the shared configuration repo is created for the account either automatically or manually on upgrade, all clusters are migrated to the same repo when app-proxy is initialized. An Argoproj application manifest is committed to the repo for each cluster managed by the runtime. - -See [(Hybrid) Upgrade provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/#hybrid-upgrade-provisioned-runtimes). - - - - - - - diff --git a/_docs/reporting/dora-metrics.md b/_docs/reporting/dora-metrics.md deleted file mode 100644 index 229b2d7b..00000000 --- a/_docs/reporting/dora-metrics.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: "DORA metrics" -description: "Get insights into your deployments" -group: reporting -toc: true ---- - -DevOps is a collaboration paradigm that is sometimes mistaken for being too abstract or too generic. In an effort to quantify the benefits of adopting DevOps, [Dora Research](https://www.devops-research.com/research.html#capabilities){:target="\_blank"} (acquired by Google in 2018), has introduced four key metrics that define specific goals for improving the software lifecycle in companies interested in adopting DevOps. - -DORA measures these metrics: - -* Deployment Frequency: How often an organization successfully releases to production -* Lead Time for Changes: The length of time for a commit to be deployed into production -* Change Failure Rate: The percentage of deployments causing a failure in production -* Time to Restore Service: The length of time for an organization to recover from a failure in production - -[Read more on DORA](https://cloud.google.com/blog/products/devops-sre/using-the-four-keys-to-measure-your-devops-performance){:target="\_blank"}. - -### DORA metrics in Codefresh - -Monitoring DORA metrics can help identify delivery issues in your organization by detecting bottlenecks among teams, and help to optimize your workflows, at technical or organizational levels. -Codefresh offers support for DORA metrics out of the box. - -* In the Codefresh UI, go to [DORA metrics](https://g.codefresh.io/2.0/dora-dashboard/dora){:target="\_blank"}. - -{% include -image.html -lightbox="true" -file="/images/reporting/dora-metrics.png" -url="/images/reporting/dora-metrics.png" -alt="DORA metrics report" -caption="DORA metrics report" -max-width="100%" -%} - -### Filters - -Use filters to define the exact subset of applications you are interested in. All filters support auto-complete and multi-select. -More than one option within the same filter type has an OR relationship. More than one filter type when defined share an AND relationship. - -* Runtimes: Show metrics for applications from selected runtimes -* Clusters: Show metrics for applications deployed to selected clusters -* Applications: Show metrics for selected applications. -* Time: Show metrics from application for a specific time period - -> When no filters are defined, all metrics are shown for the last 90 days. - -### Metrics for favorite applications -If you have [starred applications as favorites]({{site.baseurl}}/docs/deployment/applications-dashboard/#applications-dashboard-information) in the Applications dashboard, clicking {::nomarkdown}{:/} in DORA metrics, displays metrics only for those applications. - - -### Metric totals -As the title indicates, the Totals bar shows the total numbers, based on the filters defined, or for the last 90 days, if there are no filters: - -* Deployments -* Rollbacks -* Commits/Pull Requests -* Failure Rate: The number of failed deployments divided by the total number of deployments - -### Metric graphs -The metric graphs show performance for the DORA metrics, again based on the filters defined, or for the last 90 days, if there are no filters. - -In addition, you can select the granularity for each graph: - -* Daily -* Weekly -* Monthly - - - -**Deployment Frequency** - The frequency of deployments of any kind, successful or failed. Deployment is considered an Argo CD sync where there was a change. The X-axis charts the time based on the granularity, and the Y-axis charts the number of deployments. The number shown on the top right is the average deployment frequency based on granularity. - -**Change failure rate** - The failure or rollback rate in percentage for deployments. Derived by dividing the failed/rollback deployments by the total number of deployments. Failed deployments are those Argo CD deployments that lead to a sync state of Degraded. The X-axis charts the time based on the granularity, and the Y-axis charts the failure rate. The number shown on the top right is the average failure rate based on granularity, and therefore may not be equal to the Total Failure Rate. - -**Lead Time for Changes** - The average number of days from the first commit for a pull request until the deployment date for the same pull request. The X-axis charts the time based on the granularity, and the Y-axis charts the time in minutes until the deployment. The number shown on the top right is the average number of days for a commit to reach production. - -**Time to Restore Service** - The average number of hours taken for the status to return to Healthy after changing to Degraded or Unhealthy. The X-axis charts the time based on the granularity, and the Y-axis charts the time in hours. The number shown on the top right is the average number of hours between the previous deployment and rollback for the same application. - -## What to read next -[Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture/) -[Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/) - diff --git a/_docs/reporting/home-dashboard.md b/_docs/reporting/home-dashboard.md deleted file mode 100644 index c3a402c0..00000000 --- a/_docs/reporting/home-dashboard.md +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: "Home dashboard" -description: "" -group: reporting -toc: true ---- - -Get a global picture of runtimes, managed clusters, deployments, and pipelines in the Home dashboard. The Home dashboard is displayed when you log in to Codefresh, providing system-wide visualization in real-time. - -Global filters allow you to narrow the scope of the data, and drill down into specific entities for more details. - - {% include - image.html - lightbox="true" - file="/images/reporting/home-dashboard.png" - url="/images/reporting/home-dashboard.png" - alt="Home dashboard: Global enterprise analytics" - caption="Home dashboard: Global enterprise analytics" - max-width="70%" - %} - -### Global filters -Filter the view in the Home dashboard by runtimes and date range. - -{% include - image.html - lightbox="true" - file="/images/reporting/global-filters.png" - url="/images/reporting/global-filters.png" - alt="Home dashboard: Global filters" - caption="Home dashboard: Global filters" - max-width="60%" - %} - -### Runtimes and Managed Clusters - -Identify the health of the runtimes and managed clusters in your enterprise. -Health status is displayed for both hosted (if you have Hosted GitOps), and hybrid runtimes. - -Managed clusters are external clusters registered to runtimes to which you deploy applications and GitOps-managed resources. - - {% include - image.html - lightbox="true" - file="/images/reporting/runtimes-clusters-widgets.png" - url="/images/reporting/runtimes-clusters-widgets.png" - alt="Runtimes and Managed Clusters in the Home dashboard" - caption="Runtimes and Managed Clusters in the Home dashboard" - max-width="80%" - %} - -{: .table .table-bordered .table-hover} -| Item | Description | -| ------------------------| ---------------- | -|**Runtimes** | Identify failed runtimes.| -|**Managed Clusters** |{::nomarkdown}
                                                        • Status: One of the following:
                                                          • Connected: Argo CD is able to connect and successfully deploy resources to the cluster.
                                                          • Failed: Argo CD is unable to connect to the cluster because of authentication, networking, or other issues.
                                                          • Unknown: Argo CD has no information on the cluster as there are no resources deployed to the managed cluster.
                                                        • View: Click to go to the Runtimes page.
                                                          To see the runtime's managed clusters, select the runtime.
                                                        {:/}| - - -### Deployments - -Identify trends for deployments. - - {% include - image.html - lightbox="true" - file="/images/reporting/deployments-widget.png" - url="/images/reporting/deployments-widget.png" - alt="Deployments in the Home dashboard" - caption="Deployments in the Home dashboard" - max-width="70%" - %} - -{: .table .table-bordered .table-hover} -| Item | Description | -| ------------------------| ---------------- | -|**Daily/Weekly/Monthly** | Granularity for deployment views that affects the average number of deployments and the comparison to the reference period.| -|**Number and Comparison Average** | The number on the top right is the number of successful/failed/rollback deployments for the selected granularity. The percentage is the comparison to the reference period, also for the selected granularity. | -|**Successful** | The number of successful deployments per day, week, or month according to the selected granularity. | -|**Failed Deployments** | The number of successful deployments per day, week, or month according to the selected granularity. | -|**Rollbacks** | The number of successful deployments per day, week, or month according to the selected granularity. | - - - -### Applications - -Displays up to five of the most active applications and their current deployment status. - -{% include - image.html - lightbox="true" - file="/images/reporting/applications-widget.png" - url="/images/reporting/applications-widget.png" - alt="Applications in the Home dashboard" - caption="Applications in the Home dashboard" - max-width="70%" - %} - -{: .table .table-bordered .table-hover} - -| Item | Description | -| ------------------------| ---------------- | -|**Filter** | Filter applications by the cluster on which they are deployed. | -|**View** | Click to go to the Applications dashboard. See | -|**Application Name** | The name of the application, and the names of the runtime and cluster on which it is deployed. Click the name to drill down into the application in the Applications dashboard. | -|**Health status** | The health status of the application, and can be either:{::nomarkdown}
                                                        • Healthy (green): The application is running on the cluster.
                                                        • Degraded (red): The application failed to run.
                                                        • Rollback (yellow): There was a rollback to the previously deployed version.
                                                        To see the breakdown by health status, mouse over the chart.
                                                        The number at the end of the bar is the total number of deployments for the application, with the overall decrease or increase compared to the reference period. {:/} | - - - -### Delivery Pipelines - -> Delivery Pipline data is shown for hybrid enviroments. - -Displays all active pipelines for the selected date range, providing insights into trends for pipelines. Active pipelines are those with at least one active or completed workflow. -Analytics are derived by comparing the selected date range to the corresponding reference period. If your date range is the last seven days, the reference period is the seven days that precede the date range. - -{% include - image.html - lightbox="true" - file="/images/reporting/delivery-pipeline-widget.png" - url="/images/reporting/delivery-pipeline-widget.png" - alt="Delivery Pipelines in the Home dashboard" - caption="Delivery Pipelines in the Home dashboard" - max-width="80%" - %} - -{: .table .table-bordered .table-hover} - -| Item | Description | -| ------------------------| ---------------- | -|**Pipelines** | The number prefixed to the pipeline name indicates the change in position of the pipeline compared to the reference period. To drill down into a specific pipeline, click the pipeline.| -|**Filter** | The filters available to focus on the pipelines of interest:{::nomarkdown}
                                                        • Status:
                                                          • Succeeded: Pipelines with workflows completed successfully.
                                                          • Failed: Pipelines with workflows that failed.
                                                          • Error: Pipelines with workflows that resulted in errors.
                                                        • Repository: The Git repository or repositories tracked, with the events that triggered or ran the pipelines.
                                                        • Event Type: The Git or Calendar event or events by which to view pipelines. If you select Git push, only those pipelines configured to be run on Git push are displayed.
                                                        • Initiator: The user who made the commit that triggered the event and caused the pipeline to run.
                                                        {:/} | -|**View** | Click to go to the Delivery Pipelines dashboard. | -|**KPI Averages** | KPI averages for: {::nomarkdown}
                                                        • Success Rate: The average number of successful executions, in percentage.
                                                        • Average Duration: The average length of time to complete execution, in mm:ss.
                                                        • Executions: The average number of times the pipeline was triggered, in percentage.
                                                        • Committers: The number of users whose commits on the repository or repositories triggered the pipelines. User count is aggregated per user, so multiple commits from the same user are counted as a single commit.
                                                        To see detailed day-to-day values, select a line chart.{:/}| -|**Most Active Delivery Pipelines** | Up to ten pipelines with the highest number of executions. The same KPIs are displayed, and compared to those in the reference period. | -|**Longest Delivery Pipelines** | Up to ten pipelines with the longest duration. The same KPIs are displayed, and compared to those in the reference period. | - -### Related articles -[DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/) -[Monitoring applications]({{site.baseurl}}/docs/deployment/applications-dashboard/) -[Images in Codefresh]({{site.baseurl}}/docs/deployment/images/) - - diff --git a/_docs/runtime/download-runtime-logs.md b/_docs/runtime/download-runtime-logs.md deleted file mode 100644 index ca6cf8ff..00000000 --- a/_docs/runtime/download-runtime-logs.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: "View/download runtime logs" -description: "" -group: runtime -toc: true ---- - -Logs are available for completed runtimes, both for the runtime and for individual runtime components. Download runtime log files for offline viewing and analysis, or view online logs for a runtime component, and download if needed for offline analysis. Online logs support free-text search, search-result navigation, and line-warp for enhanced readability. - -Log files include events from the date of the application launch, with the newest events listed first. - - -### Download logs for runtimes -Download the log file for a runtime. The runtime log is downloaded as a `.tar.gz` file, which contains the individual log files for each runtime component. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime for which to download logs. -1. From the list of **Additional Actions**, select **Download All Logs**. - The log file is downloaded to the Downloads folder or the folder designated for downloads, with the filename, `.tar.gz`. For example, `codefreshv2-production2.tar.gz`. - - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-download-all.png" - url="/images/runtime/runtime-logs-download-all.png" - alt="Download logs for selected runtime" - caption="Download logs for selected runtime" - max-width="40%" -%} - - -{:start="4"} -1. To view the log files of the individual components, unzip the file. - Here is an example of the folder with the individual logs. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-folder-view.png" - url="/images/runtime/runtime-logs-folder-view.png" - alt="Individual log files in folder" - caption="Individual log files in folder" - max-width="50%" -%} - -{:start="5"} -1. Open a log file with the text editor of your choice. - - -### View/download logs for runtime components -View online logs for any runtime component, and if needed, download the log file for offline viewing and analysis. - -Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events from the application launch to the date and time of download. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime. -1. Select the runtime component and then select **View Logs**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-view-component.png" - url="/images/runtime/runtime-logs-view-component.png" - alt="View log option for individual runtime component" - caption="View log option for individual runtime component" - max-width="40%" -%} - - -{:start="4"} -1. Do the following: - * Search by free-text for any string, and click the next and previous buttons to navigate between the search results. - * To switch on line-wrap for readability, click **Wrap**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-screen-view.png" - url="/images/runtime/runtime-logs-screen-view.png" - alt="Runtime component log example" - caption="Runtime component log example" - max-width="50%" -%} - -{:start="5"} -1. To download the log, click **Download**. - The file is downloaded as `.log`. - -### Related information -[Manage Git Sources]({{site.baseurl}}/docs/runtime/git-sources/#viewdownload-logs-for-a-git-source) \ No newline at end of file diff --git a/_docs/runtime/installation-options.md b/_docs/runtime/installation-options.md deleted file mode 100644 index e75e2058..00000000 --- a/_docs/runtime/installation-options.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: "Installation environments" -description: "" -group: runtime -toc: true ---- - -Codefresh supports two installation environments: - - -* **Hosted** environments (Beta), with Argo CD installed in the Codefresh cluster. - The runtime is installed and provisioned in a Codefresh cluster, and managed by Codefresh. - Hosted enviroments are full-cloud environments, where all updates and improvements are managed by Codefresh, with zero-maintenance overhead for you as the customer. Currently, you can add one hosted runtime per account. - For the architecture illustration, see [Hosted runtime architecture]({{site.baseurl}}/docs/getting-started/architecture/#hosted-runtime-architecture). - - -{% include - image.html - lightbox="true" - file="/images/runtime/intro-hosted-hosted-initial-view.png" - url="/images/runtime/intro-hosted-hosted-initial-view.png" - alt="Hosted runtime setup" - caption="Hosted runtime setup" - max-width="80%" -%} - - For more information on how to set up the hosted environment, including provisioning hosted runtimes, see [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -* **Hybrid** environments, with Argo CD installed in the customer's cluster. - The runtime is installed in the customer's cluster, and managed by the customer. - Hybrid environments are optimal for organizations that want to manage CI/CD operations within their premises, or have other security constraints. Hybrid installations strike the perfect balance between security, flexibility, and ease of use. Codefresh maintains and manages most aspects of the platform, apart from installing and upgrading runtimes which are managed by the customer. - - -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view.png" - url="/images/runtime/runtime-list-view.png" - alt="Runtime List View" - caption="Runtime List View" - max-width="70%" -%} - - For more information on hybrid environments, see [Hybrid runtime requirements]({{site.baseurl}}/docs/runtime/requirements/) and [Installling hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/). - - - -#### Git provider repos -Codefresh Runtime creates three repositories in your organization's Git provider account: - -* Codefresh runtime installation repository -* Codefresh Git Sources -* Codefresh shared configuration repository - - - -### Hosted vs.Hybrid environments - -The table below highlights the main differences between hosted and hybrid environments. - -{: .table .table-bordered .table-hover} -| Functionality |Feature | Hosted | Hybrid | -| -------------- | -------------- |--------------- | --------------- | -| Runtime | Installation | Provisioned by Codefresh | Provisioned by customer | -| | Runtime cluster | Managed by Codefresh | Managed by customer | -| | Number per account | One runtime | Multiple runtimes | -| | External cluster | Managed by customer | Managed by customer | -| | Upgrade | Managed by Codefresh | Managed by customer | -| | Uninstall | Managed by customer | Managed by customer | -| Argo CD | | Codefresh cluster | Customer cluster | -| CI Ops | Delivery Pipelines |Not supported | Supported | -| |Workflows | Not supported | Supported | -| |Workflow Templates | Not supported | Supported | -| CD Ops |Applications | Supported | Supported | -| |Image enrichment | Supported | Supported | -| | Rollouts | Supported | Supported | -|Integrations | | Supported | Supported | -|Dashboards |Home Analytics | Hosted runtime and deployments|Runtimes, deployments, Delivery Pipelines | -| |DORA metrics | Supported |Supported | -| |Applications | Supported |Supported | - -### Related articles -[Architecture]({{site.baseurl}}/docs/getting-started/architecture/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration) - diff --git a/_docs/runtime/installation.md b/_docs/runtime/installation.md deleted file mode 100644 index 44012210..00000000 --- a/_docs/runtime/installation.md +++ /dev/null @@ -1,535 +0,0 @@ ---- -title: "Install hybrid runtimes" -description: "" -group: runtime -toc: true ---- - -If you have a hybrid environment, you can provision one or more hybrid runtimes in your Codefresh account. - -> If you have Hosted GitOps, to provision a hosted runtime, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -**Git providers and runtimes** -Your Codefresh account is always linked to a specific Git provider. This is the Git provider you select on installing the first runtime, either hybrid or hosted, in your Codefresh account. All the hybrid runtimes you install in the same account use the same Git provider. -If Bitbucker Server is your Git provider, you must also select the specific server instance to associate with the runtime. - ->To change the Git provider for your Codefresh account after installation, contact Codefresh support. - - -**Hybrid runtime** - The hybrid runtime comprises Argo CD components and Codefresh-specific components. The Argo CD components are derived from a fork of the Argo ecosystem, and do not correspond to the open-source versions available. - -There are two parts to installing a hybrid runtime: - -1. Installing the Codefresh CLI -2. Installing the hybrid runtime from the CLI, either through the CLI wizard or via silent installation through the installation flags. - The hybrid runtime is installed in a specific namespace on your cluster. You can install more runtimes on different clusters in your deployment. - Every hybrid runtime installation makes commits to three Git repos: - * Runtime install repo: The installation repo that manages the hybrid runtime itself with Argo CD. If the repo URL does not exist, it is automatically created during runtime installation. - * Git Source repo: Created automatically during runtime installation. The repo where you store manifests for pipelines and applications. See [Git Sources]({{site.baseurl}}/docs/runtime/git-sources). - * Shared configuration repo: Created for the first runtime in a user account. The repo stores configuration manifests for account-level resources and is shared with other runtimes in the same account. See [Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration). - - -See also [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture). - -{::nomarkdown} -
                                                        -{:/} - -### Hybrid runtime installation flags -This section describes the required and optional flags to install a hybrid runtime. -For documentation purposes, the flags are grouped into: -* Runtime flags, relating to runtime, cluster, and namespace requirements -* Ingress controller flags, relating to ingress controller requirements -* Git provider flags -* Codefresh resource flags - -{::nomarkdown} -
                                                        -{:/} - -#### Runtime flags - -**Runtime name** -Required. -The runtime name must start with a lower-case character, and can include up to 62 lower-case characters and numbers. -* CLI wizard: Add when prompted. -* Silent install: Add the `--runtime` flag and define the runtime name. - -**Namespace resource labels** -Optional. -The label of the namespace resource to which you are installing the hybrid runtime. Labels are required to identify the networks that need access during installation, as is the case when using services meshes such as Istio for example. - -* CLI wizard and Silent install: Add the `--namespace-labels` flag, and define the labels in `key=value` format. Separate multiple labels with `commas`. - -**Kube context** -Required. -The cluster defined as the default for `kubectl`. If you have more than one Kube context, the current context is selected by default. - -* CLI wizard: Select the Kube context from the list displayed. -* Silent install: Explicitly specify the Kube context with the `--context` flag. - -**Shared configuration repository** -The Git repository per runtime account with shared configuration manifests. -* CLI wizard and Silent install: Add the `--shared-config-repo` flag and define the path to the shared repo. - -{::nomarkdown} -
                                                        -{:/} - -#### Ingress-less flags -These flags are required to install the runtime without an ingress controller. - -**Access mode** -Required. - -The access mode for ingress-less runtimes, the tunnel mode. - - -* CLI wizard and Silent install: Add the flag, `--access-mode`, and define `tunnel` as the value. - - -**IP allowlist** - -Optional. - -The allowed list of IPs from which to forward requests to the internal customer cluster for ingress-less runtime installations. The allowlist can include IPv4 and IPv6 addresses, with/without subnet and subnet masks. Multiple IPs must be separated by commas. - -When omitted, all incoming requests are authenticated regardless of the IPs from which they originated. - -* CLI wizard and Silent install: Add the `--ips-allow-list` flag, followed by the IP address, or list of comma-separated IPs to define more than one. For example, `--ips-allow-list 77.126.94.70/16,192.168.0.0` - -{::nomarkdown} -
                                                        -{:/} - -#### Ingress controller flags - - -**Skip ingress** -Required, if you are using an unsupported ingress controller. -For unsupported ingress controllers, bypass installing ingress resources with the `--skip-ingress` flag. -In this case, after completing the installation, manually configure the cluster's routing service, and create and register Git integrations. See the last step in [Install the hybrid runtime](#install-the-hybrid-runtime). - -**Ingress class** -Required. - -* CLI wizard: Select the ingress class for runtime installation from the list displayed. -* Silent install: Explicitly specify the ingress class through the `--ingress-class` flag. Otherwise, runtime installation fails. - -**Ingress host** -Required. -The IP address or host name of the ingress controller component. - -* CLI wizard: Automatically selects and displays the host, either from the cluster or the ingress controller associated with the **Ingress class**. -* Silent install: Add the `--ingress-host` flag. If a value is not provided, takes the host from the ingress controller associated with the **Ingress class**. - > Important: For AWS ALB, the ingress host is created post-installation. However, when prompted, add the domain name you will create in `Route 53` as the ingress host. - -**Insecure ingress hosts** -TLS certificates for the ingress host: -If the ingress host does not have a valid TLS certificate, you can continue with the installation in insecure mode, which disables certificate validation. - -* CLI wizard: Automatically detects and prompts you to confirm continuing the installation in insecure mode. -* Silent install: To continue with the installation in insecure mode, add the `--insecure-ingress-host` flag. - -**Internal ingress host** -Optional. -Enforce separation between internal (app-proxy) and external (webhook) communication by adding an internal ingress host for the app-proxy service in the internal network. -For both CLI wizard and Silent install: - -* For new runtime installations, add the `--internal-ingress-host` flag pointing to the ingress host for `app-proxy`. -* For existing installations, commit changes to the installation repository by modifying the `app-proxy ingress` and `.yaml` - See [(Optional) Internal ingress host configuration for existing hybrid runtimes](#optional-internal-ingress-host-configuration-for-existing-hybrid-runtimes). - -{::nomarkdown} -
                                                        -{:/} - - - -#### Git provider and repo flags -The Git provider defined for the runtime. - ->Because Codefresh creates a [shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration) for the runtimes in your account, the Git provider defined for the first runtime you install in your account is used for all the other runtimes in the same account. - -You can define any of the following Git providers: -* GitHub: - * [GitHub](#github) (the default Git provider) - * [GitHub Enterprise](#github-enterprise) -* GitLab: - * [GitLab Cloud](#gitlab-cloud) - * [GitLab Server](#gitlab-server) -* Bitbucket: - * [Bitbucket Cloud](#bitbucket-cloud) - * [Bitbucket Server](#bitbucket-server) - -{::nomarkdown} -
                                                        -{:/} - - - -##### GitHub -GitHub is the default Git provider for hybrid runtimes. Being the default provider, for both the CLI wizard and Silent install, you need to provide only the repository URL and the Git runtime token. - -> For the required scopes, see [GitHub and GitHub Enterprise runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). - -`--repo --git-token ` - -where: -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. Copy the clone URL from your GitHub website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). - If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during runtime installation. - - Repo URL format: - `https://github.com//reponame>.git[/subdirectory][?ref=branch]` - where: - * `/` is your username or organization name, followed by the name of the repo, identical to the HTTPS clone URL. For example, `https://github.com/nr-codefresh/codefresh.io.git`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://github.com/nr-codefresh/codefresh.io.git/runtimes/defs?ref=codefresh-prod` -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). - -{::nomarkdown} -
                                                        -{:/} - -##### GitHub Enterprise - -> For the required scopes, see [GitHub and GitHub Enterprise runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes). - - -`--enable-git-providers --provider github --repo --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider github` (required), defines GitHub Enterprise as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitHub Enterprise website (see [Cloning with HTTPS URLs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories#cloning-with-https-urls){:target="\_blank"}). - If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during runtime installation. - Repo URL format: - - `https://ghe-trial.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` - where: - * `/` is your username or organization name, followed by the name of the repo. For example, `codefresh-io/codefresh.io.git`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://ghe-trial.devops.cf-cd.com/codefresh-io/codefresh.io.git/runtimes/defs?ref=codefresh-prod` -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitHub runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#github-and-github-enterprise-runtime-token-scopes)). - - -{::nomarkdown} -
                                                        -{:/} - -##### GitLab Cloud -> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). - - -`--enable-git-providers --provider gitlab --repo --git-token ` - -where: -* `--enable-git-providers`(required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines GitLab Cloud as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git project for the runtime installation, including the `.git` suffix. Copy the clone URL for HTTPS from your GitLab website. - If the repo doesn't exist, copy an existing clone URL and change the name of the repo. Codefresh creates the repository during runtime installation. - - > Important: You must create the group with access to the project prior to the installation. - - Repo URL format: - - `https://gitlab.com//.git[/subdirectory][?ref=branch]` - where: - * `` is either your username, or if your project is within a group, the front-slash separated path to the project. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) - * `` is the name of the project. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Examples: - `https://gitlab.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) - - `https://gitlab.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) - -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). - - -{::nomarkdown} -
                                                        -{:/} - - - -##### GitLab Server - -> For the required scopes, see [GitLab Cloud and GitLab Server runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes). - -`--enable-git-providers --provider gitlab --repo --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines GitLab Server as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. - If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during runtime installation. - - > Important: You must create the group with access to the project prior to the installation. - - Repo URL format: - `https://gitlab-onprem.devops.cf-cd.com//.git[/subdirectory][?ref=branch]` - where: - * `` is your username, or if the project is within a group or groups, the name of the group. For example, `nr-codefresh` (owner), or `parent-group/child-group` (group hierarchy) - * `` is the name of the project. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Examples: - `https://gitlab-onprem.devops.cf-cd.com/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` (owner) - - `https://gitlab-onprem.devops.cf-cd.com/parent-group/child-group/codefresh.git/runtimes/defs?ref=codefresh-prod` (group hierarchy) - -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [GitLab runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#gitlab-cloud-and-gitlab-server-runtime-token-scopes)). - - -{::nomarkdown} -
                                                        -{:/} - -##### Bitbucket Cloud -> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). - - -`--enable-git-providers --provider bitbucket --repo --git-user --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. - If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh creates the project during runtime installation. - >Important: Remove the username, including @ from the copied URL. - - Repo URL format: - - `https://bitbucket.org.git[/subdirectory][?ref=branch]` - where: - * `` is your workspace ID. For example, `nr-codefresh`. - * `` is the name of the repository. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://bitbucket.org/nr-codefresh/codefresh.git/runtimes/defs?ref=codefresh-prod` -* `--git-user ` (required), is your username for the Bitbucket Cloud account. -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). - - -{::nomarkdown} -
                                                        -{:/} - -##### Bitbucket Server - -> For the required scopes, see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes). - - -`--enable-git-providers --provider bitbucket-server --repo --git-user --git-token ` - -where: -* `--enable-git-providers` (required), indicates that you are not using the default Git provider for the runtime. -* `--provider gitlab` (required), defines Bitbucket Cloud as the Git provider for the runtime and the account. -* `--repo ` (required), is the `HTTPS` clone URL of the Git repository for the runtime installation, including the `.git` suffix. - If the project doesn't exist, copy an existing clone URL and change the name of the project. Codefresh then creates the project during runtime installation. - >Important: Remove the username, including @ from the copied URL. - - Repo URL format: - - `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm//.git[/subdirectory][?ref=branch]` - where: - * `` is your username or organization name. For example, `codefresh-io.`. - * `` is the name of the repo. For example, `codefresh`. - * `[/subdirectory]` (optional) is the path to a subdirectory within the repo. When omitted, the runtime is installed in the root of the repository. For example, `/runtimes/defs`. - * `[?ref=branch]` (optional) is the `ref` queryParam to select a specific branch. When omitted, the runtime is installed in the default branch. For example, `codefresh-prod`. - - Example: - `https://bitbucket-server-8.2.devops.cf-cd.com:7990/scm/codefresh-io/codefresh.git/runtimes/defs?ref=codefresh-prod` -* `--git-user ` (required), is your username for the Bitbucket Server account. -* `--git-token ` (required), is the Git token authenticating access to the runtime installation repository (see [Bitbucket runtime token scopes]({{site.baseurl}}/docs/reference/git-tokens/#bitbucket-cloud-and-bitbucket-server-runtime-token-scopes)). - -{::nomarkdown} -

                                                        -{:/} - -#### Codefresh resource flags -**Codefresh demo resources** -Optional. -Install demo pipelines to use as a starting point to create your own pipelines. We recommend installing the demo resources as these are used in our quick start tutorials. - -* Silent install: Add the `--demo-resources` flag, and define its value as `true` (default), or `false`. For example, `--demo-resources=true` - -**Insecure flag** -For _on-premises installations_, if the Ingress controller does not have a valid SSL certificate, to continue with the installation, add the `--insecure` flag to the installation command. - -{::nomarkdown} -

                                                        -{:/} - - -### Install the Codefresh CLI - -Install the Codefresh CLI using the option that best suits you: `curl`, `brew`, or standard download. -If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for CLI installation. - -{::nomarkdown} -

                                                        -{:/} - -### Install the hybrid runtime - -**Before you begin** -* Make sure you meet the [minimum requirements]({{site.baseurl}}/docs/runtime/requirements/#minimum-requirements) for runtime installation -* Make sure you have [runtime token with the required scopes from your Git provdier]({{site.baseurl}}/docs/reference/git-tokens) -* [Download or upgrade to the latest version of the CLI]({{site.baseurl}}/docs/clients/csdp-cli/#upgrade-codefresh-cli) -* Review [Hybrid runtime installation flags](#hybrid-runtime-installation-flags) -* Make sure your ingress controller is configured correctly: - * [Ambasador ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#ambassador-ingress-configuration) - * [AWS ALB ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#alb-aws-ingress-configuration) - * [Istio ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#istio-ingress-configuration) - * [NGINX Enterprise ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-enterprise-ingress-configuration) - * [NGINX Community ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#nginx-community-version-ingress-configuration) - * [Traefik ingress configuration]({{site.baseurl}}/docs/runtime/requirements/#traefik-ingress-configuration) - - -{::nomarkdown} -
                                                        -{:/} - -**How to** - -1. Do one of the following: - * If this is your first hybrid runtime installation, in the Welcome page, select **+ Install Runtime**. - * If you have provisioned a hybrid runtime, to provision additional runtimes, in the Codefresh UI, go to [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Click **+ Add Runtimes**, and then select **Hybrid Runtimes**. -1. Do one of the following: - * CLI wizard: Run `cf runtime install`, and follow the prompts to enter the required values. - * Silent install: Pass the required flags in the install command: - `cf runtime install --repo --git-token --silent` - For the list of flags, see [Hybrid runtime installation flags](#hybrid-runtime-installation-flags). -1. If relevant, complete the configuration for these ingress controllers: - * [ALB AWS: Alias DNS record in route53 to load balancer]({{site.baseurl}}/docs/runtime/requirements/#alias-dns-record-in-route53-to-load-balancer) - * [Istio: Configure cluster routing service]({{site.baseurl}}/docs/runtime/requirements/#cluster-routing-service) - * [NGINX Enterprise ingress controller: Patch certificate secret]({{site.baseurl}}/docs/runtime/requirements/#patch-certificate-secret) -1. If you bypassed installing ingress resources with the `--skip-ingress` flag for ingress controllers not in the supported list, create and register Git integrations using these commands: - `cf integration git add default --runtime --api-url ` - `cf integration git register default --runtime --token ` - - -{::nomarkdown} -
                                                        -{:/} - -### Hybrid runtime components - -**Git repositories** -* Runtime install repository: The installation repo contains three folders: apps, bootstrap and projects, to manage the runtime itself with Argo CD. -* Git source repository: Created with the name `[repo_name]_git-source`. This repo stores manifests for pipelines with sources, events, workflow templates. See [Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/). - -* Shared configuration repository: Stores configuration and resource manifests that can be shared across runtimes, such as integration resources. See [Shared configuration repository]({{site.baseurl}}/docs/reference/shared-configuration/) - -**Argo CD components** -* Project, comprising an Argo CD AppProject and an ApplicationSet -* Installations of the following applications in the project: - * Argo CD - * Argo Workflows - * Argo Events - * Argo Rollouts - -**Codefresh-specific components** -* Codefresh Applications in the Argo CD AppProject: - * App-proxy facilitating behind-firewall access to Git - * Git Source entity that references the`[repo_name]_git-source` - -Once the hybrid runtime is successfully installed, it is provisioned on the Kubernetes cluster, and displayed in the **Runtimes** page. - -{::nomarkdown} -
                                                        -{:/} - - -### (Optional) Internal ingress host configuration for existing hybrid runtimes -If you already have provisioned hybrid runtimes, to use an internal ingress host for app-proxy communication and an external ingress host to handle webhooks, change the specs for the `Ingress` and `Runtime` resources in the runtime installation repository. Use the examples as guidelines. - -`/apps/app-proxy/overlays//ingress.yaml`: change `host` - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: codefresh-cap-app-proxy - namespace: codefresh #replace with your runtime name -spec: - ingressClassName: nginx - rules: - - host: my-internal-ingress-host # replace with the internal ingress host for app-proxy - http: - paths: - - backend: - service: - name: cap-app-proxy - port: - number: 3017 - path: /app-proxy/ - pathType: Prefix -``` - -`..//bootstrap/.yaml`: add `internalIngressHost` - -```yaml -apiVersion: v1 -data: - base-url: https://g.codefresh.io - runtime: | - apiVersion: codefresh.io/v1alpha1 - kind: Runtime - metadata: - creationTimestamp: null - name: codefresh #replace with your runtime name - namespace: codefresh #replace with your runtime name - spec: - bootstrapSpecifier: github.com/codefresh-io/cli-v2/manifests/argo-cd - cluster: https://7DD8390300DCEFDAF87DC5C587EC388C.gr7.us-east-1.eks.amazonaws.com - components: - - isInternal: false - name: events - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-events - wait: true - - isInternal: false - name: rollouts - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-rollouts - wait: false - - isInternal: false - name: workflows - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-workflows - wait: false - - isInternal: false - name: app-proxy - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/app-proxy - wait: false - defVersion: 1.0.1 - ingressClassName: nginx - ingressController: k8s.io/ingress-nginx - ingressHost: https://support.cf.com/ - internalIngressHost: https://my-internal-ingress-host # add this line and replace my-internal-ingress-host with your internal ingress host - repo: https://github.com/NimRegev/my-codefresh.git - version: 99.99.99 -``` - - -### Related articles -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) -[Troubleshoot hybrid runtime installation]({{site.baseurl}}/docs/troubleshooting/runtime-issues/) diff --git a/_docs/runtime/installation_original.md b/_docs/runtime/installation_original.md deleted file mode 100644 index a9624bc7..00000000 --- a/_docs/runtime/installation_original.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: "Install hybrid runtimes" -description: "" -group: runtime -toc: true ---- - -If you have a hybrid environment, you can provision one or more hybrid runtimes in your Codefresh account. The hybrid runtime comprises Argo CD components and Codefresh-specific components. The Argo CD components are derived from a fork of the Argo ecosystem, and do not correspond to the open-source versions available. - -> If you have Hosted GitOps, to provision a hosted runtime, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -There are two parts to installing a hybrid runtime: - -1. Installing the Codefresh CLI -2. Installing the hybrid runtime from the CLI, either through the CLI wizard or via silent installation. - The hybrid runtime is installed in a specific namespace on your cluster. You can install more runtimes on different clusters in your deployment. - Every hybrid runtime installation makes commits to two Git repos: - - * Runtime install repo: The installation repo that manages the hybrid runtime itself with Argo CD. If the repo URL does not exist, runtime creates it automatically. - * Git Source repo: Created automatically during runtime installation. The repo where you store manifests to run CodefreshCodefresh pipelines. - -See also [Codefresh architecture]({{site.baseurl}}/docs/getting-started/architecture). - -### Installing the Codefresh CLI - -Install the Codefresh CLI using the option that best suits you: `curl`, `brew`, or standard download. -If you are not sure which OS to select for `curl`, simply select one, and Codefresh automatically identifies and selects the right OS for CLI installation. - -### Installing the hybrid runtime - -1. Do one of the following: - * If this is your first hybrid runtime installation, in the Welcome page, select **+ Install Runtime**. - * If you have provisioned a hybrid runtime, to provision additional runtimes, in the Codefresh UI, go to [**Runtimes**](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}, and select **+ Add Runtimes**. -1. Run: - * CLI wizard: Run `cf runtime install`, and follow the prompts to enter the required values. - * Silent install: Pass the required flags in the install command: - `cf runtime install --repo --git-token --silent` - For the list of flags, see _Hybrid runtime flags_. - -> Note: -> Hybrid runtime installation starts by checking network connectivity and the K8s cluster server version. - To skip these tests, pass the `--skip-cluster-checks` flag. - -#### Hybrid runtime flags - -**Runtime name** -Required. -The runtime name must start with a lower-case character, and can include up to 62 lower-case characters and numbers. -* CLI wizard: Add when prompted. -* Silent install: Required. - -**Namespace resource labels** -Optional. -The label of the namespace resource to which you are installing the hybrid runtime. You can add more than one label. Labels are required to identity the networks that need access during installation, as is the case when using services meshes such as Istio for example. - -* CLI wizard and Silent install: Add the `--namespace-labels` flag, and define the labels in `key=value` format. Separate multiple labels with `commas`. - -**Kube context** -Required. -The cluster defined as the default for `kubectl`. If you have more than one Kube context, the current context is selected by default. - -* CLI wizard: Select the Kube context from the list displayed. -* Silent install: Explicitly specify the Kube context with the `--context` flag. - -**Ingress class** -Required. -If you have more than one ingress class configured on your cluster: - -* CLI wizard: Select the ingress class for runtime installation from the list displayed. -* Silent install: Explicitly specify the ingress class through the `--ingress-class` flag. Otherwise, runtime installation fails. - -**Ingress host** -Required. -The IP address or host name of the ingress controller component. - -* CLI wizard: Automatically selects and displays the host, either from the cluster or the ingress controller associated with the **Ingress class**. -* Silent install: Add the `--ingress-host` flag. If a value is not provided, takes the host from the ingress controller associated with the **Ingress class**. - > Important: For AWS ALB, the ingress host is created post-installation. However, when prompted, add the domain name you will create in `Route 53` as the ingress host. - -SSL certificates for the ingress host: -If the ingress host does not have a valid SSL certificate, you can continue with the installation in insecure mode, which disables certificate validation. - -* CLI wizard: Automatically detects and prompts you to confirm continuing with the installation in insecure mode. -* Silent install: To continue with the installation in insecure mode, add the `--insecure-ingress-host` flag. - -**Internal ingress host** -Optional. -Enforce separation between internal (app-proxy) and external (webhook) communication by adding an internal ingress host for the app-proxy service in the internal network. -For both CLI wizard and Silent install: - -* For new runtime installations, add the `--internal-ingress-host` flag pointing to the ingress host for `app-proxy`. -* For existing installations, commit changes to the installation repository by modifying the `app-proxy ingress` and `.yaml` - See _Internal ingress host configuration (optional for existing runtimes only)_ in [Post-installation configuration](#post-installation-configuration). - -**Ingress resources** -Optional. -If you have a different routing service (not NGINX), bypass installing ingress resources with the `--skip-ingress` flag. -In this case, after completing the installation, manually configure the cluster's routing service, and create and register Git integrations. See _Cluster routing service_ in [Post-installation configuration](#post-installation-configuration). - -**Shared configuration repository** -The Git repository per runtime account with shared configuration manifests. -* CLI wizard and Silent install: Add the `--shared-config-repo` flag and define the path to the shared repo. - -**Insecure flag** -For _on-premises installations_, if the Ingress controller does not have a valid SSL certificate, to continue with the installation, add the `--insecure` flag to the installation command. - -**Repository URLs** -The GitHub repository to house the installation definitions. - -* CLI wizard: If the repo doesn't exist, Codefresh creates it during runtime installation. -* Silent install: Required. Add the `--repo` flag. - -**Git runtime token** -Required. -The Git token authenticating access to the GitHub installation repository. -* Silent install: Add the `--git-token` flag. - -**Codefresh demo resources** -Optional. -Install demo pipelines to use as a starting point to create your own pipelines. We recommend installing the demo resources as these are used in our quick start tutorials. - -* Silent install: Add the `--demo-resources` flag. By default, set to `true`. - -### Hybrid runtime components - -**Git repositories** - -* Runtime install repo: The installation repo contains three folders: apps, bootstrap and projects, to manage the runtime itself with Argo CD. -* Git source repository: Created with the name `[repo_name]_git-source`. This repo stores manifests for pipelines with sources, events, workflow templates. - -**Argo CD components** - -* Project, comprising an Argo CD AppProject and an ApplicationSet -* Installations of the following applications in the project: - * Argo CD - * Argo Workflows - * Argo Events - * Argo Rollouts - -**Codefresh-specific components** - -* Codefresh Applications in the Argo CD AppProject: - * App-proxy facilitating behind-firewall access to Git - * Git Source entity that references the`[repo_name]_git-source` - -Once the hybrid runtime is successfully installed, it is provisioned on the Kubernetes cluster, and displayed in the **Runtimes** page. - -### Hybrid runtime post-installation configuration - -After provisioning a hybrid runtime, configure additional settings for the following: - -* NGINX Enterprise installations (with and without NGINX Ingress Operator) -* AWS ALB installations -* Cluster routing service if you bypassed installing ingress resources -* (Existing hybrid runtimes) Internal and external ingress host specifications -* Register Git integrations - - - -#### AWS ALB post-install configuration - -For AWS ALB installations, do the following: - -* Create an `Alias` record in Amazon Route 53 -* Manually register Git integrations - see _Git integration registration_. - -Create an `Alias` record in Amazon Route 53, and map your zone apex (example.com) DNS name to your Amazon CloudFront distribution. -For more information, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html){:target="\_blank"}. - -{% include image.html - lightbox="true" - file="/images/runtime/post-install-alb-ingress.png" - url="/images/runtime/post-install-alb-ingress.png" - alt="Route 53 record settings for AWS ALB" - caption="Route 53 record settings for AWS ALB" - max-width="30%" -%} - -#### Configure cluster routing service - -If you bypassed installing ingress resources with the `--skip-ingress` flag, configure the `host` for the Ingress, or the VirtualService for Istio if used, to route traffic to the `app-proxy` and `webhook` services, as in the examples below. - -**Ingress resource example for `app-proxy`:** - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: codefresh-cap-app-proxy - namespace: codefresh -spec: - ingressClassName: alb - rules: - - host: my.support.cf-cd.com # replace with your host name - http: - paths: - - backend: - service: - name: cap-app-proxy - port: - number: 3017 - path: /app-proxy/ - pathType: Prefix -``` - -**`VirtualService` examples for `app-proxy` and `webhook`:** - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: cap-app-proxy -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway - http: - - match: - - uri: - prefix: /app-proxy - route: - - destination: - host: cap-app-proxy - port: - number: 3017 -``` - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: csdp-default-git-source -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway - http: - - match: - - uri: - prefix: /webhooks/test-runtime3/push-github # replace `test-runtime3` with your runtime name - route: - - destination: - host: push-github-eventsource-svc - port: - number: 80 -``` -Continue with [Git integration registration](#git-integration-registration) in this article. - -#### Internal ingress host configuration (optional for existing hybrid runtimes only) - -If you already have provisioned hybrid runtimes, to use an internal ingress host for app-proxy communication and an external ingress host to handle webhooks, change the specs for the `Ingress` and `Runtime` resources in the runtime installation repository. Use the examples as guidelines. - -`/apps/app-proxy/overlays//ingress.yaml`: change `host` - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: codefresh-cap-app-proxy - namespace: codefresh #replace with your runtime name -spec: - ingressClassName: nginx - rules: - - host: my-internal-ingress-host # replace with the internal ingress host for app-proxy - http: - paths: - - backend: - service: - name: cap-app-proxy - port: - number: 3017 - path: /app-proxy/ - pathType: Prefix -``` - -`..//bootstrap/.yaml`: add `internalIngressHost` - -```yaml -apiVersion: v1 -data: - base-url: https://g.codefresh.io - runtime: | - apiVersion: codefresh.io/v1alpha1 - kind: Runtime - metadata: - creationTimestamp: null - name: codefresh #replace with your runtime name - namespace: codefresh #replace with your runtime name - spec: - bootstrapSpecifier: github.com/codefresh-io/cli-v2/manifests/argo-cd - cluster: https://7DD8390300DCEFDAF87DC5C587EC388C.gr7.us-east-1.eks.amazonaws.com - components: - - isInternal: false - name: events - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-events - wait: true - - isInternal: false - name: rollouts - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-rollouts - wait: false - - isInternal: false - name: workflows - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/argo-workflows - wait: false - - isInternal: false - name: app-proxy - type: kustomize - url: github.com/codefresh-io/cli-v2/manifests/app-proxy - wait: false - defVersion: 1.0.1 - ingressClassName: nginx - ingressController: k8s.io/ingress-nginx - ingressHost: https://support.cf.com/ - internalIngressHost: https://my-internal-ingress-host # add this line and replace my-internal-ingress-host with your internal ingress host - repo: https://github.com/NimRegev/my-codefresh.git - version: 99.99.99 -``` - -#### Git integration registration - -If you bypassed installing ingress resources with the `--skip-ingress` flag, or if AWS ALB is your ingress controller, create and register Git integrations using these commands: - `cf integration git add default --runtime --api-url ` - - `cf integration git register default --runtime --token ` - -### Related articles -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) -[Troubleshoot runtime installation]({{site.baseurl}}/docs/troubleshooting/runtime-issues/) diff --git a/_docs/runtime/monitor-manage-runtimes.md b/_docs/runtime/monitor-manage-runtimes.md deleted file mode 100644 index 189b2b08..00000000 --- a/_docs/runtime/monitor-manage-runtimes.md +++ /dev/null @@ -1,332 +0,0 @@ ---- -title: "Manage provisioned runtimes" -description: "" -group: runtime -redirect_from: - - /monitor-manage-runtimes/ - - /monitor-manage-runtimes -toc: true ---- - - -The **Runtimes** page displays the provisioned runtimes in your account, both hybrid, and the hosted runtime if you have one. - -View runtime components and information in List or Topology view formats, and upgrade, uninstall, and migrate runtimes. - -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view.png" - url="/images/runtime/runtime-list-view.png" - alt="Runtime List View" - caption="Runtime List View" - max-width="70%" -%} - -Select the view mode to view runtime components and information, and manage provisioned runtimes in the view mode that suits you. - - -Manage provisioned runtimes: -* [Add managed clusters to hybrid or hosted runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/)) -* [Add and manage Git Sources associated with hybrid or hosted runtimes]({{site.baseurl}}/docs/runtime/git-sources/)) -* [Upgrade provisioned hybrid runtimes](#hybrid-upgrade-provisioned-runtimes) -* [Uninstall provisioned runtimes](#uninstall-provisioned-runtimes) -* [Migrate ingress-less hybrid runtimes]((#hybrid-migrate-ingress-less-runtimes)) - -> Unless specified otherwise, management options are common to both hybrid and hosted runtimes. If an option is valid only for hybrid runtimes, it is indicated as such. - -* Add managed clusters to hybrid or hosted runtimes (see [Adding & managing external clusters]({{site.baseurl}}/docs/runtime/managed-cluster/)) -* Add and manage Git Sources associated with hybrid or hosted runtimes (see [Adding & managing Git Sources]({{site.baseurl}}/docs/runtime/git-sources/)) -* Upgrade provisioned hybrid runtimes -* Uninstall hybrid or hosted runtimes -* Update Git runtime tokens - -To monitor provisioned hybrid runtimes, including recovering runtimes for failed clusters, see [Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/). - -### Runtime views - -View provisioned hybrid and hosted runtimes in List or Topology view formats. - -* List view: The default view, displays the list of provisioned runtimes, the clusters managed by them, and Git Sources. -* Topology view: Displays a hierarchical view of runtimes and the clusters managed by them, with health and sync status of each cluster. - -#### List view - -The List view is a grid-view of the provisioned runtimes. - -Here is an example of the List view for runtimes. -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view.png" - url="/images/runtime/runtime-list-view.png" - alt="Runtime List View" - caption="Runtime List View" - max-width="70%" -%} - -Here is a description of the information in the List View. - -{: .table .table-bordered .table-hover} -| List View Item| Description | -| -------------- | ---------------- | -|**Name**| The name of the provisioned Codefresh runtime. | -|**Type**| The type of runtime provisioned, and can be **Hybrid** or **Hosted**. | -|**Cluster/Namespace**| The K8s API server endpoint, as well as the namespace with the cluster. | -|**Modules**| The modules installed based on the type of provisioned runtime. Hybrid runtimes include CI amnd CD Ops modules. Hosted runtimes inlcude CD Ops. | -|**Managed Cluster**| The number of managed clusters if any, for the runtime. To view list of managed clusters, select the runtime, and then the **Managed Clusters** tab. To work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster).| -|**Version**| The version of the runtime currently installed. **Update Available!** indicates there are later versions of the runtime. To see all the commits to the runtime, mouse over **Update Available!**, and select **View Complete Change Log**. -|**Last Updated**| The most recent update information from the runtime to the Codefresh platform. Updates are sent to the platform typically every few minutes. Longer update intervals may indicate networking issues.| -|**Sync Status**| The health and sync status of the runtime or cluster. {::nomarkdown}
                                                        • indicates health or sync errors in the runtime, or a managed cluster if one was added to the runtime.
                                                          The runtime name is colored red.
                                                        • indicates that the runtime is being synced to the cluster on which it is provisioned.
                                                        {:/} | - -#### Topology view - -A hierachical visualization of the provisioned runtimes. The Topology view makes it easy to identify key information such as versions, health and sync status, for both the provisioned runtime and the clusters managed by it. -Here is an example of the Topology view for runtimes. - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-topology-view.png" - url="/images/runtime/runtime-topology-view.png" - alt="Runtime Topology View" - caption="Runtime Topology View" - max-width="30%" -%} - -Here is a description of the information in the Topology view. - -{: .table .table-bordered .table-hover} -| Topology View Item | Description | -| ------------------------| ---------------- | -|**Runtime** | ![](../../../images/icons/codefresh-runtime.png?display=inline-block) the provisioned runtime. Hybrid runtimes display the name of the K8s API server endpoint with the cluster. Hosted runtimes display 'hosted'. | -|**Cluster** | The local, and managed clusters if any, for the runtime. {::nomarkdown}
                                                        • indicates the local cluster, always displayed as `in-cluster`. The in-cluster server URL is always set to `https://kubernetes.default.svc/`.
                                                        • indicates a managed cluster.
                                                        • select to add a new managed cluster.
                                                        {:/} To view cluster components, select the cluster. To add and work with managed clusters, see [Adding external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster). | -|**Health/Sync status** |The health and sync status of the runtime or cluster. {::nomarkdown}
                                                        • indicates health or sync errors in the runtime, or a managed cluster if one was added to the runtime.
                                                          The runtime or cluster node is bordered in red and the name is colored red.
                                                        • indicates that the runtime is being synced to the cluster on which it is provisioned.
                                                        {:/} | -|**Search and View options** | {::nomarkdown}
                                                        • Find a runtime or its clusters by typing part of the runtime/cluster name, and then navigate to the entries found.
                                                        • Topology view options: Resize to window, zoom in, zoom out, full screen view.
                                                        {:/}| - - - -### (Hybrid) Upgrade provisioned runtimes - -Upgrade provisioned hybrid runtimes to install critical security updates or to install the latest version of all components. Upgrade a provisioned hybrid runtime by running a silent upgrade or through the CLI wizard. -If you have managed clusters for the hybrid runtime, upgrading the runtime automatically updates runtime components within the managed cluster as well. - -> When there are security updates, the UI displays the alert, _At least one runtime requires a security update_. The Version column displays an _Update Required!_ notification. - -> If you have older runtime versions, upgrade to manually define or create the shared configuration repo for your account. See [Shared configuration repo]({{site.baseurl}}/docs/reference/shared-configuration/). - - -**Before you begin** -For both silent or CLI-wizard based upgrades, make sure you have: - -* The latest version of the Codefresh CLI - Run `cf version` to see your version and [click here](https://github.com/codefresh-io/cli-v2/releases){:target="\_blank"} to compare with the latest CLI version. -* A valid runtime Git token - -**Silent upgrade** - -* Pass the mandatory flags in the upgrade command: - - `cf runtime upgrade --git-token --silent` - where: - `` is a valid runtime token with the `repo` and `admin-repo.hook` scopes. - -**CLI wizard-based upgrade** - -1. In the Codefresh UI, make sure you are in [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Switch to either the **List View** or to the **Topology View**. -1. **List view**: - * Select the runtime name. - * To see all the commits to the runtime, in the Version column, mouse over **Update Available!**, and select **View Complete Change Log**. - * On the top-right, select **Upgrade**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-list-view-upgrade.png" - url="/images/runtime/runtime-list-view-upgrade.png" - alt="List View: Upgrade runtime option" - caption="List View: Upgrade runtime option" - max-width="30%" - %} - - **Topology view**: - Select the runtime cluster, and from the panel, select the three dots and then select **Upgrade Runtime**. - {% include - image.html - lightbox="true" - file="/images/runtime/runtiime-topology-upgrade.png" - url="/images/runtime/runtiime-topology-upgrade.png" - alt="Topology View: Upgrade runtime option" - caption="Topology View: Upgrade runtime option" - max-width="30%" -%} - -{:start="4"} - -1. If you have already installed the Codefresh CLI, in the Install Upgrades panel, copy the upgrade command. - - {% include - image.html - lightbox="true" - file="/images/runtime/install-upgrades.png" - url="/images/runtime/install-upgrades.png" - alt="Upgrade runtime" - caption="Upgrade runtime panel" - max-width="30%" -%} - -{:start="5"} -1. In your terminal, paste the command, and do the following: - * Update the Git token value. - * To manually define the shared configuration repo, add the `--shared-config-repo` flag with the path to the repo. -1. Confirm to start the upgrade. - - - - -### Uninstall provisioned runtimes - -Uninstall provisioned hybrid and hosted runtimes that are not in use. Uninstall a runtime by running a silent uninstall, or through the CLI wizard. -> Uninstalling a runtime removes the Git Sources and managed clusters associated with the runtime. - -**Before you begin** -For both types of uninstalls, make sure you have: - -* The latest version of the Codefresh CLI -* A valid runtime Git token -* The Kube context from which to uninstall the provisioned runtime - -**Silent uninstall** -Pass the mandatory flags in the uninstall command: - `cf runtime uninstall --git-token --silent` - where: - `--git-token` is a valid runtime token with the `repo` and `admin-repo.hook` scopes. - -**CLI wizard uninstall** - -1. In the Codefresh UI, make sure you are in [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Switch to either the **List View** or to the **Topology View**. -1. **List view**: On the top-right, select the three dots and then select **Uninstall**. - - {% include - image.html - lightbox="true" - file="/images/runtime/uninstall-location.png" - url="/images/runtime/uninstall-location.png" - alt="List View: Uninstall runtime option" - caption="List View: Uninstall runtime option" - max-width="30%" -%} - -**Topology view**: Select the runtime node, and from the panel, select the three dots and then select **Uninstall Runtime**. - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-topology-uninstall.png" - url="/images/runtime/runtime-topology-uninstall.png" - alt="Topology View: Uninstall runtime option" - caption="Topology View: Uninstall runtime option" - max-width="30%" -%} - -{:start="4"} - -1. If you already have the latest version of the Codefresh CLI, in the Uninstall Codefresh Runtime panel, copy the uninstall command. - - {% include - image.html - lightbox="true" - file="/images/runtime/uninstall.png" - url="/images/runtime/uninstall.png" - alt="Uninstall Codefresh runtime" - caption="Uninstall Codefresh runtime" - max-width="40%" -%} - -{:start="5"} - -1. In your terminal, paste the command, and update the Git token value. -1. Select the Kube context from which to uninstall the runtime, and then confirm the uninstall. -1. If you get errors, run the uninstall command again, with the `--force` flag. - - - -### Update Git runtime tokens - -Provisioned runtimes require valid Git tokens to authenticate the runtimes. - -There are two different situations when you need to updating Git runtime tokens: -* Update invalid, revoked, or expired tokens: Codefresh automatically flags runtimes with such tokens. It is mandatory to update the Git tokens for these runtimes to continue working with the platform. -* Update valid tokens: Optional. You may want to update Git runtime tokens, even valid runtime tokens, by deleting the existing token and replacing it with a new runtime token. - -The methods for updating any Git runtime token is the same regardless of the reason for the update: -* OAuth2 authorization, if your admin has registered an OAuth Application for Codefresh -* Git access token authentication, by generating a personal access token in your Git provider account with the correct permissions - -**Before you begin** -* To authenticate through a Git access token, generate an access token for the runtime with the correct scopes: `repo` and `admin-repo` - -**How to** -1. Do one of the following: - * If you see a notification in the Codefresh UI about invalid runtime tokens, click **[Update Token]**. - In the Runtimes page, you can see runtimes with invalid tokens are prefixed by the key icon. Mouse over shows invalid token. - * To update an existing runtime token, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. Select the runtime for which to update the Git token. -1. From the context menu with the additional actions at the top-right, select **Update Git Runtime token**. - - {% include - image.html - lightbox="true" - file="/images/runtime/update-git-runtime-token.png" - url="/images/runtime/update-git-runtime-token.png" - alt="Update Git runtime token option" - caption="Update Git runtime token option" - max-width="40%" -%} - -{:start="4"} -1. Do one of the following: - * If your admin has set up OAuth access, click **Authorize Access to Git Provider**. Go to _step 5_. - * Alternatively, authenticate with an access token from your Git provider. Go to _step 6_. - -{:start="5"} -1. For OAuth2 authorization: - > If the application is not registered, you get an error. Contact your admin for help. - * Enter your credentials, and select **Sign In**. - * If required, as for example if two-factor authentication is configured, complete the verification. - - {% include - image.html - lightbox="true" - file="/images/administration/user-settings/oauth-user-authentication.png" - url="/images/administration/user-settings/oauth-user-authentication.png" - alt="Authorizing access with OAuth2" - caption="Authorizing access with OAuth2" - max-width="30%" - %} - -{:start="6"} -1. For Git token authentication, expand **Advanced authorization options**, and then paste the generated token in the **Git runtime token** field. - -1. Click **Update Token**. - - -### Related articles -[Monitor provisioned hybrid runtimes]({{site.baseurl}}/docs/runtime/monitoring-troubleshooting/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) - diff --git a/_docs/runtime/monitoring-troubleshooting.md b/_docs/runtime/monitoring-troubleshooting.md deleted file mode 100644 index c225c1b4..00000000 --- a/_docs/runtime/monitoring-troubleshooting.md +++ /dev/null @@ -1,282 +0,0 @@ ---- -title: "(Hybrid) Monitor provisioned runtimes" -description: "" -group: runtime -toc: true ---- - -Monitor provisioned runtimes for security, health, and sync errors: - -* (Hybrid and hosted) View/download logs for runtimes and for runtime components -* (Hybrid) Restore provisioned runtimes -* (Hybrid) Configure browsers to allow access to insecure runtimes -* (Hybrid) Monitor notifications in the Activity Log - - -### View/download logs to troubleshoot runtimes -Logs are available for completed runtimes, both for the runtime and for individual runtime components. Download runtime log files for offline viewing and analysis, or view online logs for a runtime component, and download if needed for offline analysis. Online logs support free-text search, search-result navigation, and line-warp for enhanced readability. - -Log files include events from the date of the application launch, with the newest events listed first. - -{::nomarkdown} -

                                                        -{:/} - -#### Download logs for runtimes -Download the log file for a runtime. The runtime log is downloaded as a `.tar.gz` file, which contains the individual log files for each runtime component. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime for which to download logs. -1. From the list of **Additional Actions**, select **Download All Logs**. - The log file is downloaded to the Downloads folder or the folder designated for downloads, with the filename, `.tar.gz`. For example, `codefreshv2-production2.tar.gz`. - - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-download-all.png" - url="/images/runtime/runtime-logs-download-all.png" - alt="Download logs for selected runtime" - caption="Download logs for selected runtime" - max-width="40%" -%} - - -{:start="4"} -1. To view the log files of the individual components, unzip the file. - Here is an example of the folder with the individual logs. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-folder-view.png" - url="/images/runtime/runtime-logs-folder-view.png" - alt="Individual log files in folder" - caption="Individual log files in folder" - max-width="50%" -%} - -{:start="5"} -1. Open a log file with the text editor of your choice. - -{::nomarkdown} -

                                                        -{:/} - -#### View/download logs for runtime components -View online logs for any runtime component, and if needed, download the log file for offline viewing and analysis. - -Online logs show up to 1000 of the most recent events (lines), updated in real time. Downloaded logs include all the events from the application launch to the date and time of download. - -1. In the Codefresh UI, go to [Runtimes](https://g.codefresh.io/2.0/account-settings/runtimes){:target="\_blank"}. -1. If needed, switch to **List View**, and then select the runtime. -1. Select the runtime component and then select **View Logs**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-view-component.png" - url="/images/runtime/runtime-logs-view-component.png" - alt="View log option for individual runtime component" - caption="View log option for individual runtime component" - max-width="40%" -%} - - -{:start="4"} -1. Do the following: - * Search by free-text for any string, and click the next and previous buttons to navigate between the search results. - * To switch on line-wrap for readability, click **Wrap**. - - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-logs-screen-view.png" - url="/images/runtime/runtime-logs-screen-view.png" - alt="Runtime component log example" - caption="Runtime component log example" - max-width="50%" -%} - -{:start="5"} -1. To download the log, click **Download**. - The file is downloaded as `.log`. - -### (Hybrid) Restore provisioned runtimes - -In case of cluster failure, restore the provisioned hybrid runtime from the existing runtime installation repository. For partial or complete cluster failures, you can restore the runtime to either the failed cluster or to a different cluster. Restoring the provisioned runtime reinstalls the runtime leveraging the resources in the existing runtime repo. - -Restoring the runtime: -* Applies `argo-cd` from the installation manifests in your repo to your cluster -* Associates `argo-cd` with the existing installation repo -* Applies the runtime and `argo-cd` secrets to the cluster -* Updates the runtime config map (`.yaml` in the `bootstrap` directory) with the new cluster configuration for these fields: - `cluster` - `ingressClassName` - `ingressController` - `ingressHost` - -{::nomarkdown} -

                                                        -{:/} - -#### How to restore a hybrid runtime -Reinstall the hybrid runtime from the existing installation repository to restore it to the same or a different cluster. - -**Before you begin** - -* Have the following information handy: - > All values must be the identical to the runtime to be restored. - * Runtime name - * Repository URL - * Codefresh context - * Kube context: Required if you are restoring to the same cluster - -**How to** - -1. Run: - `cf runtime install --from-repo` -1. Provide the relevant values when prompted. -1. If you are performing the runtime recovery in a different cluster, verify the ingress resource configuration for `app-proxy`, `workflows`, and `default-git-source`. - If the health status remains as `Progressing`, do the following: - - * In the runtime installation repo, check if the `ingress.yaml` files for the `app-proxy` and `workflows` are configured with the correct `host` and `ingressClassName`: - - `apps/app-proxy/overlays//ingress.yaml` - `apps/workflows/overlays//ingress.yaml` - - * In the Git Source repository, check the `host` and `ingressClassName` in `cdp-default-git-source.ingress.yaml`: - - `resources_/cdp-default-git-source.ingress.yaml` - - See the [example](#ingress-example) below. - -{:start="4"} -1. If you have managed clusters registered to the hybrid runtime you are restoring, reconnect them. - Run the command and follow the instructions in the wizard: - `cf cluster add` - -1. Verify that you have a registered Git integration: - `cf integration git list --runtime ` - -1. If needed, create a new Git integration: - `cf integration git add default --runtime --provider github --api-url https://api.github.com` - -{::nomarkdown} -

                                                        -{:/} - -#### Ingress example -This is an example of the `ingress.yaml` for `workflows`. - - ```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - ingress.kubernetes.io/protocol: https - ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/backend-protocol: https - nginx.ingress.kubernetes.io/rewrite-target: /$2 - creationTimestamp: null - name: runtime-name-workflows-ingress - namespace: runtime-name -spec: - ingressClassName: nginx - rules: - - host: your-ingress-host.com - http: - paths: - - backend: - service: - name: argo-server - port: - number: 2746 - path: /workflows(/|$)(.*) - pathType: ImplementationSpecific -status: - loadBalancer: {} -``` - - -### (Hybrid) Configure browser to allow insecure hybrid runtimes - -If at least one of your hybrid runtimes was installed in insecure mode (without an SSL certificate for the ingress controller from a CA), the UI alerts you that _At least one runtime was installed in insecure mode_. -{% include - image.html - lightbox="true" - file="/images/runtime/runtime-insecure-alert.png" - url="/images/runtime/runtime-insecure-alert.png" - alt="Insecure runtime installation alert" - caption="Insecure runtime installation alert" - max-width="100%" -%} - -All you need to do is to configure the browser to trust the URL and receive content. - -1. Select **View Runtimes** to the right of the alert. - You are taken to the Runtimes page, where you can see insecure runtimes tagged as **Allow Insecure**. - {% include - image.html - lightbox="true" - file="/images/runtime/runtime-insecure-steps.png" - url="/images/runtime/runtime-insecure-steps.png" - alt="Insecure runtimes in Runtime page" - caption="Insecure runtimes in Runtime page" - max-width="40%" -%} -{:start="2"} -1. For _every_ insecure runtime, select **Allow Insecure**, and when the browser prompts you to allow access, do as relevant: - -* Chrome: Click **Advanced** and then **Proceed to site**. -* Firefox: Click **Advanced** and then **Accept the risk and continue**. -* Safari: Click **Show Certificate**, and then select **Always allow content from site**. -* Edge: Click **Advanced**, and then select **Continue to site(unsafe)**. - -### (Hybrid) View notifications for hybrid runtimes in Activity Log - -The Activity Log is a quick way to monitor notifications for runtime events such as upgrades. A pull-down panel in the Codefresh toolbar, the Activity Log shows ongoing, success, and error notifications, sorted by date, starting with today's date. - -1. In the Codefresh UI, on the top-right of the toolbar, select ![](/images/pipeline/monitoring/pipeline-activity-log-toolbar.png?display=inline-block) **Activity Log**. -1. To see notifications for provisioned runtimes, filter by **Runtime**. - - {% include image.html - lightbox="true" - file="/images/runtime/runtime-activity-log.png" - url="/images/runtime/runtime-activity-log.png" - alt="Activity Log filtered by Runtime events" - caption="Activity Log filtered by Runtime events" - max-width="30%" - %} - -{:start="3"} - -1. To see more information on an error, select the **+** sign. - -### (Hybrid) Troubleshoot health and sync errors for runtimes - -The ![](/images/icons/error.png?display=inline-block) icon with the runtime in red indicates either health or sync errors. - -**Health errors** -Health errors are generated by Argo CD and by Codefresh for runtime components. - -**Sync errors** -Runtimes with sync errors display an **Out of sync** status in Sync Status column. They are related to discrepancies between the desired and actual state of a runtime component or one of the Git sources associated with the runtime. - -**View errors** -For both views, select the runtime, and then select **Errors Detected**. -Here is an example of health errors for a runtime. - - {% include image.html - lightbox="true" - file="/images/runtime/runtime-health-sync-errors.png" - url="/images/runtime/runtime-health-sync-errors.png" - alt="Health errors for runtime example" - caption="Health errors for runtime example" - max-width="30%" - %} - -### Related articles -[Manage provisioned runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Add Git Sources to runtimes]({{site.baseurl}}/docs/runtime/git-sources/) -[Add external clusters to runtimes]({{site.baseurl}}/docs/runtime/managed-cluster/) \ No newline at end of file diff --git a/_docs/runtime/requirements.md b/_docs/runtime/requirements.md deleted file mode 100644 index f8499f4d..00000000 --- a/_docs/runtime/requirements.md +++ /dev/null @@ -1,742 +0,0 @@ ---- -title: "Hybrid runtime requirements" -description: "" -group: runtime -toc: true ---- - - -The requirements listed are the **_minimum_** requirements to provision **_hybrid runtimes_** in the Codefresh platform. - -> Hosted runtimes are managed by Codefresh. To provision a hosted runtime as part of Hosted GitOps setup, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - ->In the documentation, Kubernetes and K8s are used interchangeably. - -{::nomarkdown} -
                                                        -{:/} - -### Minimum requirements - -{: .table .table-bordered .table-hover} -| Item | Requirement | -| -------------- | -------------- | -|Kubernetes cluster | Server version 1.18 and higher, without Argo Project components. {::nomarkdown}
                                                        Tip: To check the server version, run:
                                                        kubectl version --short.{:/}| -| Ingress controller| Configured on Kubernetes cluster and exposed from the cluster. {::nomarkdown}
                                                        Supported and tested ingress controllers include:
                                                        • Ambassador
                                                        • {:/}(see [Ambassador ingress configuration](#ambassador-ingress-configuration)){::nomarkdown}
                                                        • AWS ALB (Application Load Balancer)
                                                        • {:/} (see [AWS ALB ingress configuration](#aws-alb-ingress-configuration)){::nomarkdown}
                                                        • Istio
                                                        • {:/} (see [Istio ingress configuration](#istio-ingress-configuration)){::nomarkdown}
                                                        • NGINX Enterprise (nginx.org/ingress-controller)
                                                        • {:/} (see [NGINX Enterprise ingress configuration](#nginx-enterprise-ingress-configuration)){::nomarkdown}
                                                        • NGINX Community (k8s.io/ingress-nginx)
                                                        • {:/} (see [NGINX Community ingress configuration](#nginx-community-version-ingress-configuration)){::nomarkdown}
                                                        • Trafik
                                                        • {:/}(see [Traefik ingress configuration](#traefik-ingress-configuration))| -|Node requirements| {::nomarkdown}
                                                          • Memory: 5000 MB
                                                          • CPU: 2
                                                          {:/}| -|Cluster permissions | Cluster admin permissions | -|Git providers |{::nomarkdown}
                                                          • GitHub
                                                          • GitHub Enterprise
                                                          • GitLab Cloud
                                                          • GitLab Server
                                                          • Bitbucket Cloud
                                                          • Bitbucket Server
                                                          {:/}| -|Git access tokens | {::nomarkdown}Git runtime token:
                                                          • Valid expiration date
                                                          • Scopes:
                                                            • GitHub and GitHub Enterprise: repo, admin-repo.hook
                                                            • GitLab Cloud and GitLab Server: api, read_repository
                                                            • Bitbucket Cloud and Server: Permissions: Read, Workspace membership: Read, Webhooks: Read and write, Repositories: Write, Admin
                                                            {:/}| - - - -{::nomarkdown} -

                                                            -{:/} - -### Ambassador ingress configuration -For detailed configuration information, see the [Ambassador ingress controller documentation](https://www.getambassador.io/docs/edge-stack/latest/topics/running/ingress-controller){:target="\_blank"}. - -This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. -* Valid external IP address -* Valid TLS certificate -* TCP support - -{::nomarkdown} -
                                                            -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - {::nomarkdown} -
                                                            -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                                                            -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -

                                                            -{:/} - -### AWS ALB ingress configuration - -For detailed configuration information, see the [ALB AWS ingress controller documentation](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4){:target="\_blank"}. - -This table lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} -| What to configure | When to configure | -| -------------- | -------------- | -|Valid external IP address | _Before_ installing hybrid runtime | -|Valid TLS certificate | | -|TCP support| | -|Controller configuration] | | -|Alias DNS record in route53 to load balancer | _After_ installing hybrid runtime | -|(Optional) Git integration registration | | - -{::nomarkdown} -
                                                            -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                                                            -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                                                            -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                                                            -{:/} - -#### Controller configuration -In the ingress resource file, verify that `spec.controller` is configured as `ingress.k8s.aws/alb`. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: alb -spec: - controller: ingress.k8s.aws/alb -``` - -{::nomarkdown} -
                                                            -{:/} - -#### Create an alias to load balancer in route53 - -> The alias must be configured _after_ installing the hybrid runtime. - -1. Make sure a DNS record is available in the correct hosted zone. -1. _After_ hybrid runtime installation, in Amazon Route 53, create an alias to route traffic to the load balancer that is automatically created during the installation: - * **Record name**: Enter the same record name used in the installation. - * Toggle **Alias** to **ON**. - * From the **Route traffic to** list, select **Alias to Application and Classic Load Balancer**. - * From the list of Regions, select the region. For example, **US East**. - * From the list of load balancers, select the load balancer that was created during installation. - -For more information, see [Creating records by using the Amazon Route 53 console](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-creating.html){:target="\_blank"}. - -{% include image.html - lightbox="true" - file="/images/runtime/post-install-alb-ingress.png" - url="/images/runtime/post-install-alb-ingress.png" - alt="Route 53 record settings for AWS ALB" - caption="Route 53 record settings for AWS ALB" - max-width="60%" -%} - -{::nomarkdown} -
                                                            -{:/} - -#### (Optional) Git integration registration -If the installation failed, as can happen if the DNS record was not created within the timeframe, manually create and register Git integrations using these commands: - `cf integration git add default --runtime --api-url ` - `cf integration git register default --runtime --token ` - -{::nomarkdown} -

                                                            -{:/} - -### Istio ingress configuration -For detailed configuration information, see [Istio ingress controller documentation](https://istio.io/latest/docs/tasks/traffic-management/ingress/kubernetes-ingress){:target="\_blank}. - -The table below lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} -| What to configure | When to configure | -| -------------- | -------------- | -|Valid external IP address |_Before_ installing hybrid runtime | -|Valid TLS certificate| | -|TCP support | | -|Cluster routing service | _After_ installing hybrid runtime | - -{::nomarkdown} -
                                                            -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                                                            -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                                                            -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                                                            -{:/} - -#### Cluster routing service -> The cluster routing service must be configured _after_ installing the hybrid runtime. - -Based on the runtime version, you need to configure a single or different `VirtualService` resources for these services: - -##### Runtime version 0.0.543 or higher -Configure a single `VirtualService` resource to route traffic to the `app-proxy`, `webhook`, and `workflow` services, as in the example below. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: pov-codefresh-istio-runtime # replace with your runtime name - name: internal-router -spec: - hosts: - - pov-codefresh-istio-runtime.sales-dev.codefresh.io # replace with your host name - gateways: - - istio-system/internal-router # replace with your gateway name - http: - - match: - - uri: - prefix: /webhooks - route: - - destination: - host: internal-router - port: - number: 80 - - match: - - uri: - prefix: /app-proxy - route: - - destination: - host: internal-router - port: - number: 80 - - match: - - uri: - prefix: /workflows - route: - - destination: - host: internal-router - port: - number: 80 -``` - -##### Runtime version 0.0.542 or lower - -Configure two different `VirtualService` resources, one to route traffic to the `app-proxy`, and the second to route traffic to the `webhook` services, as in the examples below. - -{::nomarkdown} -
                                                            -{:/} - -**`VirtualService` example for `app-proxy`:** - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: cap-app-proxy -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway # replace with your host name - http: - - match: - - uri: - prefix: /app-proxy - route: - - destination: - host: cap-app-proxy - port: - number: 3017 -``` -{::nomarkdown} -
                                                            -{:/} - -**`VirtualService` example for `webhook`:** - -> Configure a `webhook` for each event defined in the event source. - -```yaml -apiVersion: networking.istio.io/v1alpha3 -kind: VirtualService -metadata: - namespace: test-runtime3 # replace with your runtime name - name: csdp-default-git-source -spec: - hosts: - - my.support.cf-cd.com # replace with your host name - gateways: - - my-gateway # replace with your gateway name - http: - - match: - - uri: - prefix: /webhooks/test-runtime3/push-github # replace `test-runtime3` with your runtime name - route: - - destination: - host: push-github-eventsource-svc - port: - number: 80 -``` - - - -{::nomarkdown} -

                                                            -{:/} - -### NGINX Enterprise ingress configuration - -For detailed configuration information, see [NGINX ingress controller documentation](https://docs.nginx.com/nginx-ingress-controller){:target="\_blank}. - -The table below lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} -| What to configure | When to configure | -| -------------- | -------------- | -|Verify valid external IP address |_Before_ installing hybrid runtime | -|Valid TLS certificate | | -|TCP support| | -|NGINX Ingress: Enable report status to cluster | | -|NGINX Ingress Operator: Enable report status to cluster| | -|Patch certificate secret |_After_ installing hybrid runtime - -{::nomarkdown} -
                                                            -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                                                            -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                                                            -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                                                            -{:/} - -#### NGINX Ingress: Enable report status to cluster - -If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. - -* Pass `--report-ingress-status` to `deployment`. - -```yaml -spec: - containers: - - args: - - --report-ingress-status -``` - -{::nomarkdown} -
                                                            -{:/} - -#### NGINX Ingress Operator: Enable report status to cluster - -If the ingress controller is not configured to report its status to the cluster, Argo’s health check reports the health status as “progressing” resulting in a timeout error during installation. - -1. Add this to the `Nginxingresscontrollers` resource file: - - ```yaml - ... - spec: - reportIngressStatus: - enable: true - ... - ``` - -1. Make sure you have a certificate secret in the same namespace as the runtime. Copy an existing secret if you don't have one. -You will need to add this to the `ingress-master` when you have completed runtime installation. - -{::nomarkdown} -
                                                            -{:/} - -#### Patch certificate secret -> The certificate secret must be configured _after_ installing the hybrid runtime. - -Patch the certificate secret in `spec.tls` of the `ingress-master` resource. -The secret must be in the same namespace as the runtime. - -1. Go to the runtime namespace with the NGINX ingress controller. -1. In `ingress-master`, add to `spec.tls`: - - ```yaml - tls: - - hosts: - - - secretName: - ``` - -{::nomarkdown} -

                                                            -{:/} - -### NGINX Community version ingress configuration - -Codefresh has been tested with and supports implementations of the major providers. For your convenience, we have provided configuration instructions, both for supported and untested providers in [Provider-specific configuration](#provider-specific-configuration). - - -This section lists the specific configuration requirements for Codefresh to be completed _before_ installing the hybrid runtime. -* Verify valid external IP address -* Valid TLS certificate -* TCP support - -{::nomarkdown} -
                                                            -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services, and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                                                            -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                                                            -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -Here's an example of TCP configuration for NGINX Community on AWS. -Verify that the `ingress-nginx-controller` service manifest has either of the following annotations: - -`service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"` -OR -`service.beta.kubernetes.io/aws-load-balancer-type: nlb` - -{::nomarkdown} -
                                                            -{:/} - -#### Provider-specific configuration - -> The instructions are valid for `k8s.io/ingress-nginx`, the community version of NGINX. - -
                                                            -AWS -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/aws/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for AWS. -
                                                            -
                                                            -Azure (AKS) -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for AKS. - -
                                                            - -
                                                            -Bare Metal Clusters -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/baremetal/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -Bare-metal clusters often have additional considerations. See Bare-metal ingress-nginx considerations. - -
                                                            - -
                                                            -Digital Ocean -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/do/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Digital Ocean. - -
                                                            - -
                                                            -Docker Desktop -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Docker Desktop.
                                                            -Note: By default, Docker Desktop services will provision with localhost as their external address. Triggers in delivery pipelines cannot reach this instance unless they originate from the same machine where Docker Desktop is being used. - -
                                                            - -
                                                            -Exoscale -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/exoscale/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Exoscale. - -
                                                            - - -
                                                            -Google (GKE) -
                                                            -Add firewall rules -
                                                            -GKE by default limits outbound requests from nodes. For the runtime to communicate with the control-plane in Codefresh, add a firewall-specific rule. - -
                                                              -
                                                            1. Find your cluster's network:
                                                              - gcloud container clusters describe [CLUSTER_NAME] --format=get"(network)" -
                                                            2. -
                                                            3. Get the Cluster IPV4 CIDR:
                                                              - gcloud container clusters describe [CLUSTER_NAME] --format=get"(clusterIpv4Cidr)" -
                                                            4. -
                                                            5. Replace the `[CLUSTER_NAME]`, `[NETWORK]`, and `[CLUSTER_IPV4_CIDR]`, with the relevant values:
                                                              - gcloud compute firewall-rules create "[CLUSTER_NAME]-to-all-vms-on-network"
                                                              - - --network="[NETWORK]" \ -
                                                              - - --source-ranges="[CLUSTER_IPV4_CIDR]" \ -
                                                              - - --allow=tcp,udp,icmp,esp,ah,sctp -
                                                              -
                                                            6. -
                                                            -
                                                            -Use ingress-nginx
                                                            -
                                                              -
                                                            1. Create a `cluster-admin` role binding:
                                                              - - kubectl create clusterrolebinding cluster-admin-binding \ -
                                                              - - --clusterrole cluster-admin \ -
                                                              - - --user $(gcloud config get-value account) -
                                                              -
                                                            2. -
                                                            3. Apply:
                                                              - - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml - -
                                                            4. -
                                                            5. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            6. - -
                                                            -We recommend reviewing the provider-specific documentation for GKE. - -
                                                            - - -
                                                            -MicroK8s -
                                                              -
                                                            1. Install using Microk8s addon system:
                                                              - microk8s enable ingress -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -MicroK8s has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                                                            - - -
                                                            -MiniKube -
                                                              -
                                                            1. Install using MiniKube addon system:
                                                              - minikube addons enable ingress -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -MiniKube has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                                                            - - - -
                                                            -Oracle Cloud Infrastructure -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Oracle Cloud. - -
                                                            - -
                                                            -Scaleway -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/scw/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Scaleway. - -
                                                            - -{::nomarkdown} -

                                                            -{:/} - -### Traefik ingress configuration -For detailed configuration information, see [Traefik ingress controller documentation](https://doc.traefik.io/traefik/providers/kubernetes-ingress){:target="\_blank}. - -The table below lists the specific configuration requirements for Codefresh. - -{: .table .table-bordered .table-hover} - -| What to configure | When to configure | -| -------------- | -------------- | -|Valid external IP address | _Before_ installing hybrid runtime | -|Valid SSL certificate | | -|TCP support | | -|Enable report status to cluster| | - -{::nomarkdown} -
                                                            -{:/} - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the `EXTERNAL-IP` column for your ingress controller shows a valid hostname. - -{::nomarkdown} -
                                                            -{:/} - -#### Valid TLS certificate -For secure runtime installation, the ingress controller must have a valid TLS certificate. -> Use the FQDN (Fully Qualified Domain Name) of the ingress controller for the TLS certificate. - -{::nomarkdown} -
                                                            -{:/} - -#### TCP support -Configure the ingress controller to handle TCP requests. - -{::nomarkdown} -
                                                            -{:/} - -#### Enable report status to cluster -By default, the Traefik ingress controller is not configured to report its status to the cluster. If not configured, Argo’s health check reports the health status as “progressing”, resulting in a timeout error during installation. - -To enable reporting its status, add `publishedService` to `providers.kubernetesIngress.ingressEndpoint`. - -The value must be in the format `"/"`, where: - `` is the Traefik service from which to copy the status - -```yaml -... -providers: - kubernetesIngress: - ingressEndpoint: - publishedService: "/" # Example, "codefresh/traefik-default" -... -``` - -{::nomarkdown} -
                                                            -{:/} - -### What to read next -[Hybrid runtime installation flags]({{site.baseurl}}/docs/runtime/installation//#hybrid-runtime-installation-flags) -[Install hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/) diff --git a/_docs/runtime/requirements_orig.md b/_docs/runtime/requirements_orig.md deleted file mode 100644 index 29fad0ee..00000000 --- a/_docs/runtime/requirements_orig.md +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: "Hybrid runtime requirements" -description: "" -group: runtime -toc: true ---- - - -The requirements listed are the **_minimum_** requirements to provision **_hybrid runtimes_** in the Codefresh platform. - -> Hosted runtimes are managed by Codefresh. To provision a hosted runtime as part of Hosted GitOps setup, see [Provision a hosted runtime]({{site.baseurl}}/docs/runtime/hosted-runtime/#1-provision-hosted-runtime) in [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - ->In the documentation, Kubernetes and K8s are used interchangeably. - -### Requirements - -{: .table .table-bordered .table-hover} -| Item | Requirement | -| -------------- | -------------- | -|Kubernetes cluster | Server version 1.18 and higher, without Argo Project components. Tip: To check the server version, run `kubectl version --short`.| -| Ingress controller| Configured on Kubernetes cluster and exposed from the cluster. {::nomarkdown}

                                                            See XREF {:/}| -|Node requirements| {::nomarkdown}
                                                            • Memory: 5000 MB
                                                            • CPU: 2
                                                            {:/}| -|Runtime namespace | resource permissions| -| | `ServiceAccount`: Create, Delete | -| | `ConfigMap`: Create, Update, Delete | -| | `Service`: Create, Update, Delete | -| | `Role`: In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| |`RoleBinding`: In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| | `persistentvolumeclaims`: Create, Update, Delete | -| | `pods`: Create, Update, Delete | -| Git providers | {::nomarkdown}
                                                            • Hosted: GitHub
                                                            • Hybrid:
                                                              • GitHub
                                                              • GitLab
                                                              • Bitbucket Server
                                                              • GitHub Enterprise
                                                            | -| Git access tokens | {::nomarkdown}
                                                            • Runtime Git token:
                                                              • Valid expiration date
                                                              • Scopes: `repo` and `admin-repo.hook`
                                                            • Runtime Git token:
                                                              • Valid expiration date
                                                              • Scopes: `repo` and `admin-repo.hook`
                                                            | - -### NGINX EN - -#### Valid external IP address -Run `kubectl get svc -A` to get a list of services and verify that the EXTERNAL-IP column for your ingress controller shows a valid hostname. - -#### Valid SSL certificate -For secure runtime installation, the ingress controller must have a valid SSL certificate from an authorized CA (Certificate Authority). - -#### TCP support -Configure to handle TCP requests. - -Here's an example of TCP configuration for NGINX on AWS. -Verify that the ingress-nginx-controller service manifest has either of the following annotations: - -`service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"` -OR -`service.beta.kubernetes.io/aws-load-balancer-type: nlb` - - - -* AWS ALB - In the ingress resource file, verify that `spec.controller` is configured as `ingress.k8s.aws/alb`. - -```yaml -apiVersion: networking.k8s.io/v1 -kind: IngressClass -metadata: - name: alb -spec: - controller: ingress.k8s.aws/alb -``` - -* Report status - The ingress controller must be configured to report its status. Otherwise, Argo's health check reports the health status as "progressing" resulting in a timeout error during installation. - - By default, NGINX Enterprise and Traefik ingress are not configured to report status. For details on configuration settings, see the following sections in this article: - [NGINX Enterprise ingress configuration](#nginx-enterprise-version-ingress-configuration) - [Traefik ingress configuration](#traefik-ingress-configuration) - - -#### NGINX Enterprise version ingress configuration -The Enterprise version of NGINX (`nginx.org/ingress-controller`), both with and without the Ingress Operator, must be configured to report the status of the ingress controller. - -**Installation with NGINX Ingress** -* Pass the `- -report-ingress-status` to `deployment`. - - ```yaml - spec: - containers: - - args: - - -report-ingress-status - ``` - -**Installation with NGINX Ingress Operator** - -1. Add this to the `Nginxingresscontrollers` resource file: - - ```yaml - ... - spec: - reportIngressStatus: - enable: true - ... - ``` - -1. Make sure you have a certificate secret in the same namespace as the runtime. Copy an existing secret if you don't have one. -You will need to add this to the `ingress-master` when you have completed runtime installation. - -#### NGINX Community version provider-specific ingress configuration -Codefresh has been tested and is supported in major providers. For your convenience, here are provider-specific configuration instructions, both for supported and untested providers. - -> The instructions are valid for `k8s.io/ingress-nginx`, the community version of NGINX. - -
                                                            -AWS -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/aws/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for AWS. -
                                                            -
                                                            -Azure (AKS) -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for AKS. - -
                                                            - -
                                                            -Bare Metal Clusters -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/baremetal/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -Bare-metal clusters often have additional considerations. See Bare-metal ingress-nginx considerations. - -
                                                            - -
                                                            -Digital Ocean -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/do/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Digital Ocean. - -
                                                            - -
                                                            -Docker Desktop -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Docker Desktop.
                                                            -Note: By default, Docker Desktop services will provision with localhost as their external address. Triggers in delivery pipelines cannot reach this instance unless they originate from the same machine where Docker Desktop is being used. - -
                                                            - -
                                                            -Exoscale -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/exoscale/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Exoscale. - -
                                                            - - -
                                                            -Google (GKE) -
                                                            -Add firewall rules -
                                                            -GKE by default limits outbound requests from nodes. For the runtime to communicate with the control-plane in Codefresh, add a firewall-specific rule. - -
                                                              -
                                                            1. Find your cluster's network:
                                                              - gcloud container clusters describe [CLUSTER_NAME] --format=get"(network)" -
                                                            2. -
                                                            3. Get the Cluster IPV4 CIDR:
                                                              - gcloud container clusters describe [CLUSTER_NAME] --format=get"(clusterIpv4Cidr)" -
                                                            4. -
                                                            5. Replace the `[CLUSTER_NAME]`, `[NETWORK]`, and `[CLUSTER_IPV4_CIDR]`, with the relevant values:
                                                              - gcloud compute firewall-rules create "[CLUSTER_NAME]-to-all-vms-on-network"
                                                              - - --network="[NETWORK]" \ -
                                                              - - --source-ranges="[CLUSTER_IPV4_CIDR]" \ -
                                                              - - --allow=tcp,udp,icmp,esp,ah,sctp -
                                                              -
                                                            6. -
                                                            -
                                                            -Use ingress-nginx
                                                            -
                                                              -
                                                            1. Create a `cluster-admin` role binding:
                                                              - - kubectl create clusterrolebinding cluster-admin-binding \ -
                                                              - - --clusterrole cluster-admin \ -
                                                              - - --user $(gcloud config get-value account) -
                                                              -
                                                            2. -
                                                            3. Apply:
                                                              - - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml - -
                                                            4. -
                                                            5. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            6. - -
                                                            -We recommend reviewing the provider-specific documentation for GKE. - -
                                                            - - -
                                                            -MicroK8s -
                                                              -
                                                            1. Install using Microk8s addon system:
                                                              - microk8s enable ingress -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -MicroK8s has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                                                            - - -
                                                            -MiniKube -
                                                              -
                                                            1. Install using MiniKube addon system:
                                                              - minikube addons enable ingress -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -MiniKube has not been tested with Codefresh, and may require additional configuration. For details, see Ingress addon documentation. - -
                                                            - - - -
                                                            -Oracle Cloud Infrastructure -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Oracle Cloud. - -
                                                            - -
                                                            -Scaleway -
                                                              -
                                                            1. Apply:
                                                              - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/scw/deploy.yaml -
                                                            2. -
                                                            3. Verify a valid external address exists:
                                                              - kubectl get svc ingress-nginx-controller -n ingress-nginx -
                                                            4. -
                                                            -For additional configuration options, see ingress-nginx documentation for Scaleway. - -
                                                            -
                                                            - -#### Traefik ingress configuration -To enable the the Traefik ingress controller to report the status, add `publishedService` to `providers.kubernetesIngress.ingressEndpoint`. - -The value must be in the format `"/"`, where: - `` is the Traefik service from which to copy the status - - ```yaml - ... - providers: - kubernetesIngress: - ingressEndpoint: - publishedService: "/" # Example, "codefresh/traefik-default" ... - ... - ``` - -#### - -#### Runtime namespace permissions for resources - -{: .table .table-bordered .table-hover} -| Resource | Permissions Required| -| -------------- | -------------- | -| `ServiceAccount` | Create, Delete | -| `ConfigMap` | Create, Update, Delete | -| `Service` | Create, Update, Delete | -| `Role` | In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| `RoleBinding` | In group `rbac.authorization.k8s.io`: Create, Update, Delete | -| `persistentvolumeclaims` | Create, Update, Delete | -| `pods` | Creat, Update, Delete | - -### Git repository requirements -This section lists the requirements for Git installation repositories. - -#### Git installation repo -If you are using an existing repo, make sure it is empty. - -#### Git access tokens -Codefresh requires two access tokens, one for runtime installation, and the second, a personal token for each user to authenticate Git-based actions in Codefresh. - -##### Git runtime token -The Git runtime token is mandatory for runtime installation. - -The token must have valid: - * Expiration date: Default is `30 days` - * Scopes: `repo` and `admin-repo.hook` - - {% include - image.html - lightbox="true" - file="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - url="/images/getting-started/quick-start/quick-start-git-event-permissions.png" - alt="Scopes for Git runtime token" - caption="Scopes for Git runtime token" - max-width="30%" - %} - -##### Git user token for Git-based actions -The Git user token is the user's personal token and is unique to every user. It is used to authenticate every Git-based action of the user in Codefresh. You can add the Git user token at any time from the UI. - - The token must have valid: - * Expiration date: Default is `30 days` - * Scope: `repo` - - {% include - image.html - lightbox="true" - file="/images/runtime/git-token-scope-resource-repos.png" - url="/images/runtime/git-token-scope-resource-repos.png" - alt="Scope for Git personal user token" - caption="Scope for Git personal user token" - max-width="30%" - %} - -For detailed information on GitHub tokens, see [Creating a personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token). - - -### What to read next -[Installing hybrid runtimes]({{site.baseurl}}/docs/runtime/installation/) diff --git a/_docs/runtime/runtime-recovery.md b/_docs/runtime/runtime-recovery.md deleted file mode 100644 index a645ee62..00000000 --- a/_docs/runtime/runtime-recovery.md +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "Restore runtimes" -description: "" -group: runtime -toc: true ---- - -In case of cluster failure, restore the runtime from the existing runtime installation repository. For partial or complete cluster failures, you can restore the runtime to either the failed cluster or to a different cluster. Restoring the runtime reinstalls the runtime leveraging the resources in the existing runtime repo. - -Restoring the runtime: -* Applies `argo-cd` from the installation manifests in your repo to your cluster -* Associates `argo-cd` with the existing installation repo -* Applies the runtime and `argo-cd` secrets to the cluster -* Updates the runtime config map (`.yaml` in the `bootstrap` directory) with the new cluster configuration for these fields: - `cluster` - `ingressClassName` - `ingressController` - `ingressHost` - - -### How to restore a runtime -Reinstall the runtime from the existing installation repository to restore it to the same or a different cluster. - -**Before you begin** - -* Have the following information handy: - > All values must be the identical to the runtime to be restored. - * Runtime name - * Repository URL - * Codefresh context - * Kube context: Required if you are restoring to the same cluster - -**How to** - -1. Run: - `cf runtime install --from-repo` -1. Provide the relevant values when prompted. -1. If you are performing runtime recovery in a different cluster, verify the ingress resource configuration for `app-proxy`, `workflows`, and `default-git-source`. - If the health status remains as `Progressing`, do the following: - - * In the runtime installation repo, check if the `ingress.yaml` files for the `app-proxy` and `workflows` are configured with the correct `host` and `ingressClassName`: - - `apps/app-proxy/overlays//ingress.yaml` - `apps/workflows/overlays//ingress.yaml` - - * In the Git Source repository, check the `host` and `ingressClassName` in `cdp-default-git-source.ingress.yaml`: - - `resources_/cdp-default-git-source.ingress.yaml` - - See the [example](#ingress-example) below. - -{:start="4"} -1. If you have managed clusters registered to the runtime you are restoring, reconnect them. - Run the command and follow the instructions in the wizard: - `cf cluster add` - -1. Verify that you have a registered Git integration: - `cf integration git list --runtime ` - -1. If needed, create a new Git integration: - `cf integration git add default --runtime --provider github --api-url https://api.github.com` - - - -### Ingress example -This is an example of the `ingress.yaml` for `workflows`. - - ```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - annotations: - ingress.kubernetes.io/protocol: https - ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/backend-protocol: https - nginx.ingress.kubernetes.io/rewrite-target: /$2 - creationTimestamp: null - name: runtime-name-workflows-ingress - namespace: runtime-name -spec: - ingressClassName: nginx - rules: - - host: your-ingress-host.com - http: - paths: - - backend: - service: - name: argo-server - port: - number: 2746 - path: /workflows(/|$)(.*) - pathType: ImplementationSpecific -status: - loadBalancer: {} -``` - -### What to read next -[Manage runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/) -[Manage Git Sources]({{site.baseurl}}/docs/runtime/git-sources/) -[Managed clusters]({{site.baseurl}}/docs/runtime/managed-cluster/) \ No newline at end of file diff --git a/_docs/administration/single-sign-on/sso-ldap.md b/_docs/single-sign-on/ldap.md similarity index 97% rename from _docs/administration/single-sign-on/sso-ldap.md rename to _docs/single-sign-on/ldap.md index c3d4369e..e0a11232 100644 --- a/_docs/administration/single-sign-on/sso-ldap.md +++ b/_docs/single-sign-on/ldap.md @@ -36,7 +36,7 @@ Make sure also that you know the scope of the search, that is, where users to se * **Certificate**: The security certificate of the LDAP server for `ldaps` only. Paste the value directly on the field. Do not convert to base64 or any other encoding by hand. Leave the field empty if you use `ldap`. {:start="4"} -1. Select **+ Add**. LDAP users can log in to Codefresh. +1. Select **+ Add**. LDAP users can now log in to Codefresh. >Each user who logs in to Codefresh must: 1. Have a defined email address on the LDAP server diff --git a/_docs/single-sign-on/oidc.md b/_docs/single-sign-on/oidc.md new file mode 100644 index 00000000..79a86e81 --- /dev/null +++ b/_docs/single-sign-on/oidc.md @@ -0,0 +1,66 @@ +--- +title: "Setting up OIDC Federated SSO" +description: "OpenID Connect (OIDC) Single Sign-On (SSO) setup" +group: single-sign-on +toc: true +--- + +Codefresh natively supports login using GitHub, Bitbucket and GitLab using the OpenID Connect (OAuth2) protocol. + + +## Prerequisites + +To successfully add an identity provider (IdP) in Codefresh, you need to do some preparatory work with both Codefresh and the provider: + +1. Inform your IdP that it will provide SSO services to Codefresh +1. Set up Codefresh and point it to your IdP. + +The first procedure differs according to your IdP, but the second one is common to all providers. + +>SSO is only available to Enterprise customers. Please [contact sales](https://codefresh.io/contact-sales/){:target="\_blank"} in order to enable it for your Codefresh account. + +## OIDC SSO configuration in Codefresh + +Here's what you need to do to configure SSO via OIDC in Codefresh: + +1. Configure SSO settings for the IdP: + This generally includes defining settings both in Codefresh and in the IdP. + Codefresh supports OIDC SSO for the following: + * [Auth0]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-auth0/) + * [Azure]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-azure/) + * [Google]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-google/) + * [Okta]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-okta/) + * [OneLogin]({{site.baseurl}}/docs/single-sign-on/oidc/oidc-onelogin/) + +1. Test integration with the IdP: + + >Before enabling SSO for users in Codefresh, you **MUST** make sure that it is working for the test user. + When SSO is enabled for a user, Codefresh allows login only through the SSO for the user and blocks logins through other IdPs. If the selected SSO method does not work for some reason, the is locked out of Codefresh. + + 1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. + 1. In the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. + 1. Add an active user to be used for testing. We recommend you use your own user. + 1. From the **SSO** dropdown, select the provider you want to test. + 1. Keep the current browser session open, and log in via Corporate SSO in an incognito tab (or another browser). + + {% include image.html + lightbox="true" + file="/images/administration/sso/sign-with-sso.png" + url="/images/administration/sso/sign-with-sso.png" + alt="Sign-in with SSO" + caption="Sign-in with SSO" + max-width="50%" + %} + +{:start="2"} +1. (Optional) [Set an IdP as the default provider]({{site.baseurl}}/docs/single-sign-on/team-sync/#set-a-default-sso-provider-for-account) + You can select an IdP as the default SSO provider for a Codefresh account. This means that all the new users added to that account will automatically use the selected IdP for signin. +1. (Optional) [Set the SSO method for each user]({{site.baseurl}}/docs/single-sign-on/team-sync/#select-sso-method-for-individual-users) + You can also select if needed, a different SSO provider for every user or for specific users. + +> Codefresh has an internal cache for SSO configuration, and it can take up to five minutes for your changes to take effect. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) + diff --git a/_docs/administration/single-sign-on/sso-auth0.md b/_docs/single-sign-on/oidc/oidc-auth0.md similarity index 79% rename from _docs/administration/single-sign-on/sso-auth0.md rename to _docs/single-sign-on/oidc/oidc-auth0.md index 5153a3d3..4c052c34 100644 --- a/_docs/administration/single-sign-on/sso-auth0.md +++ b/_docs/single-sign-on/oidc/oidc-auth0.md @@ -1,18 +1,23 @@ --- title: "Auth0" -description: "Set Up Auth0 Single Sign-On (SSO)" -group: administration -sub_group: single-sign-on +description: "Set up Auth0 Single Sign-On (SSO)" +group: single-sign-on +sub_group: oidc redirect_from: - /docs/enterprise/sso-auth0/ - /docs/enterprise/single-sign-on/sso-auth0/ toc: true --- +Set up SSO for Auth0 using OIDC. Auth0 is one of the SSO providers that Codefresh supports for authentication and authorization. -Create an SSO account for Auth0 in Codefresh. Start by creating an Auth0 application, then create the SSO account for Auth0 in Codefresh, and finally define the SSO settings for the application you created in Auth0. -### 1. Create an Auth0 application +Set up OIDC SSO for Auth0 in Codefresh by: +1. Creating an Auth0 application in Auth0 +1. Creating the SSO account for Auth0 in Codefresh +1. Definng the SSO settings for the application you created in Auth0 + +## Step 1: Create an Auth0 application First create a new application in Auth0. 1. Log in to Auth0. @@ -47,12 +52,12 @@ max-width="40%" %} {:start="5"} -1. Continue with _Create SSO account for Auth0 in Codefresh_. +1. Continue with [Step 2: Create SSO account for Auth0 in Codefresh](#step-2-create-sso-account-for-auth0-in-codefresh). -### 2. Create SSO account for Auth0 in Codefresh +## Step 2: Create SSO account for Auth0 in Codefresh After creating an Auth0 application, create an SSO account for OAuth0 in Codefresh. -1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on). +1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. 1. Click **Add Single Sign-On**. 1. For the Single Sign-On Service, select **Auth0**, and click **Next**. 1. Define the connection details: @@ -75,10 +80,10 @@ max-width="40%" {:start="5"} 1. Click **Save**. 1. Copy the Client Name that is assigned to identify this SSO account. You will have to add it to the Auth0 application. -1. Continue with _Define SSO settings in Auth0 application_. +1. Continue with [Step 3: Define SSO settings in Auth0 application](#step-3-define-sso-settings-in-auth0-application). -### 3. Define SSO settings in Auth0 application +## Step 3: Define SSO settings in Auth0 application As the final step in Auth0 SSO setup, return to Auth0, and then define the Login URI and Callback URL for the Auth0 application you created in 1. 1. From the sidebar, select **Applications > Applications**. @@ -101,8 +106,9 @@ max-width="50%" You have completed SSO setup for Auth0 in Codefresh. -### Related articles +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) -[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/administration/single-sign-on/) diff --git a/_docs/single-sign-on/oidc/oidc-azure.md b/_docs/single-sign-on/oidc/oidc-azure.md new file mode 100644 index 00000000..54df28d4 --- /dev/null +++ b/_docs/single-sign-on/oidc/oidc-azure.md @@ -0,0 +1,259 @@ +--- +title: "Azure Single Sign-On (SSO)" +description: " " +group: single-sign-on +sub_group: oidc +redirect_from: + - /docs/enterprise/single-sign-on/sso-azure/ +toc: true +--- + +Set up SSO for Azure using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). + +Set up OIDC SSO for Azure in Codefresh by: +1. Registering the Codefresh application in Azure +1. Configuring permissions for the Codefresh application in Azure +1. Creating the Client secret in Azure +1. Completing SSO configuration for Azure in Codefresh +1. Configuring redirect URIs in Azure + + +## Prerequisites + +Make sure that your user in Azure who creates the application is assigned either of these roles: +**Application Administrator** +OR +**Global Administrator** + +If the user who creates the Azure application is not assigned to either of these roles, you will be unable to sync teams from Azure to Codefresh. + + +## Step 1: Register the Codefresh application in Azure + +To setup Azure Active Directory for SSO, first register a new application in Azure. + +1. Log in to the **Azure Portal**, and from the sidebar, select **Azure Active Directory**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/register-app-select-azure-ad.png" +url="/images/sso/azure/register-app-select-azure-ad.png" +alt="Azure Active Directory" +caption="Azure Active Directory" +max-width="70%" +%} + +{:start="2"} +1. From the sidebar, select **App registrations**, and then click **+ New registration**. +1. Enter a name for the application, for example, `Codefresh`, and retain the default values for all other settings. + +{% include image.html +lightbox="true" +file="/images/sso/azure/register-app-name.png" +url="/images/sso/azure/register-app-name.png" +alt="Enter name and register application" +caption="Enter name and register application" +max-width="70%" +%} + +{:start="4"} +1. To apply your changes, click **Register**. The application is created and registered in Azure AD. +1. Continue with [Step 2: Configure permissions for the application in Azure](#step-2-configure-permissions-for-the-application-in-azure). + + +## Step 2: Configure permissions for the application in Azure + +Once the application has been created and registered, configure the required permissions. + +1. Click the name of the application to open **Settings**. +1. Do the following: + * Select **API permissions**, and then click **+ Add a permission**. + * From **Request API Permissions**, select **Microsoft APIs**, and then select **Microsoft Graph**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/config-app-permissions-microsoft-graph.png" +url="/images/sso/azure/config-app-permissions-microsoft-graph.png" +alt="Select Microsoft Graph" +caption="Select Microsoft Graph" +max-width="70%" +%} + +{:start="3"} +1. Click **Application permissions** on the left, and select `Group > Read.All`. + +> The `User.Read.All (Delegated)` permission is added by default. + +{% include image.html +lightbox="true" +file="/images/sso/azure/config-app-permissions-selected.png" +url="/images/sso/azure/config-app-permissions-selected.png" +alt="`Group > Read.All` permissions for Microsoft Graph" +caption="`Group > Read.All` permissions for Microsoft Graph" +max-width="70%" +%} + +{:start="4"} +1. Click **Add Permissions**. +1. Click **Grant admin consent for Default Directory** on the bar. + +{% include image.html +lightbox="true" +file="/images/sso/azure/config-app-permissions-added.png" +url="/images/sso/azure/config-app-permissions-added.png" +alt="Grant admin consent for Default Directory" +caption="Grant admin consent for Default Directory" +max-width="70%" +%} + +{:start="6"} +1. Continue with [Step 3: Create client secret in Azure](#step-3-create-client-secret-in-azure). + + +## Step 3: Create client secret in Azure + +Create a client secret for the application. You will need to provide it when you set up SSO for Azure in Codefresh. + +1. From the sidebar, select **Certificates & secrets**, and then click **+ New client secret**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/client-secret-select-option.png" +url="/images/sso/azure/client-secret-select-option.png" +alt="Create client secret" +caption="Create client secret" +max-width="70%" +%} + +{:start="2"} +1. Optional. Add a meaningful description for the client secret, and either retain the default expiry date or define a custom one. + +{% include image.html +lightbox="true" +file="/images/sso/azure/client-secret-add-description.png" +url="/images/sso/azure/client-secret-add-description.png" +alt="Description for client secret" +caption="Description for client secret" +max-width="70%" +%} + +> Tip: Make a note of the expiry date in your calendar to renew the key before the expiry date and prevent service interruptions. + +{:start="3"} +1. Click **Add**. + **Copy the secret key**, as you will need to provide it on setting up Azure SSO in Codefresh. +1. Continue with [Step 4: Configure SSO settinggs for Azure in Codefresh](#step-4-configure-sso-settings-for-azure-in-codefresh). + +## Step 4: Configure SSO settings for Azure in Codefresh + +Configure SSO for Azure in the Codefresh UI. + +**Before you begin** +* From Azure AD: + * Have your client secret handy + * Go to the application you created, and note down these **Properties: Application ID and Object ID** + + {% include image.html +lightbox="true" +file="/images/sso/azure/azure-properties-object-app-ids.png" +url="/images/sso/azure/azure-properties-object-app-ids.png" +alt="Application and Object IDs in Azure" +caption="Application and Object IDs in Azure" +max-width="70%" +%} + + +**How to** + +1. In the Codefresh UI, select **Account Settings**, and then from the sidebar, select **Single Sign On**. +1. Click **Add Single Sign-On**, and select **Azure AD**. +1. Enter the following: + * **Client Name**: For auto-generation, leave empty. Codefresh generates the client name once you save the settings. + * **Display Name**: Meaningful name for the SSO provider - Shown as display name in Azure (see below) + * **Access token** and **Application ID**: The Application ID from your Enterprise Application Properties in Azure AD. + * **Client Secret**: The key value you copied when you created the client secret in Azure. + * **Tenant**: `mycompany.onmicrosoft.com` or the ID of `0example1-0000-0aa0-a00a-1example0` + * **Object ID**: The Object ID from your Enterprise Application Properties in Azure AD. + * **Auto Sync users and teams to Codefresh**: Select to automatically sync user accounts in Azure AD to your Codefresh account. Optionally, define the time interval, in hours, at which to sync, from 1 to 24. If you don’t specify an interval, the sync is every 12 hours. + + {% include image.html +lightbox="true" +file="/images/sso/azure/sso-codefresh-settings.png" +url="/images/sso/azure/sso-codefresh-settings.png" +alt="SSO settings for Azure in Codefresh" +caption="SSO settings for Azure in Codefres" +max-width="70%" +%} + +{:start="4"} +1. Click **Save**. + If you left the Client Name empty, Codefresh generates one (as in the example below). Codefresh uses this name to identify the SSO configuration. + +{% include image.html +lightbox="true" +file="/images/sso/azure/sso-codefresh-generated-client-id.png" +url="/images/sso/azure/sso-codefresh-generated-client-id.png" +alt="Example of Codefresh-generated Client Name for Azure" +caption="Example of Codefresh-generated Client Name for Azure" +max-width="50%" +%} + + We will need this value in the reply URL setting (back in the Azure portal UI). +1. Continue with [Step 5: Configure redirect URIs in Azure](#step-5-configure-redirect-uris-in-azure). + + +## Step 5: Configure redirect URIs in Azure + +As the final step, add the Codefresh callback URL to the allowed reply URLs for the created application in Azure. + +**Before you begin** +* Make sure you have the Client Name for the Azure SSO configuration from Codefresh + + +**How to** + +1. Go to **Azure Active Directory > Apps registrations**, and select the application you registered for SSO. +1. From the sidebar, select **Authentication**. +1. Below **Platform Configuration**, click **Add a platform** and then select **Web**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/redirect-uri-web-configure.png" +url="/images/sso/azure/redirect-uri-web-configure.png" +alt="Select Web configuration settings" +caption="Select Web configuration settings" +max-width="70%" +%} + +{:start="4"} +1. In the Configure Web form, do the following: + * In the **Redirect URIs** field, enter the redirect URI in the format below: + `https://g.codefresh.io/api/auth//callback` + where: + `` is the Client Name shown in the SSO configuration, either defined by you or created by Codefresh. + * Select **ID tokens**. + +{% include image.html +lightbox="true" +file="/images/sso/azure/redirect-rui-define-select-id-tokens.png" +url="/images/sso/azure/redirect-rui-define-select-id-tokens.png" +alt="Web configuration settings" +caption="Web configuration settings" +max-width="70%" +%} + +You have now completed the SSO setup for Azure using OIDC. + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) diff --git a/_docs/administration/single-sign-on/sso-google.md b/_docs/single-sign-on/oidc/oidc-google.md similarity index 70% rename from _docs/administration/single-sign-on/sso-google.md rename to _docs/single-sign-on/oidc/oidc-google.md index e6d6a6a9..e69e9493 100644 --- a/_docs/administration/single-sign-on/sso-google.md +++ b/_docs/single-sign-on/oidc/oidc-google.md @@ -1,18 +1,23 @@ --- title: "Google Single Sign-On (SSO)" description: "" -group: administration -sub_group: single-sign-on +group: single-sign-on +sub_group: oidc toc: true --- -Setting up SSO for Google in Codefresh requires you to create a client secret for Codefresh in Google, configure SSO settings in Codefresh and then define the redirect URIs, also in Google. -For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). +Set up SSO for Google using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). +Set up OIDC SSO for Google in Codefresh by: +1. Creating the client secret in Google +1. Configuring SSO settings for Google in Codefresh +1. Setting up the redirect URI in Google -### Create Client Secret -1. Log in to [https://console.developers.google.com/](https://console.developers.google.com/). +## Step 1: Create Client Secret in Google + +1. Log in to [https://console.developers.google.com/](https://console.developers.google.com/){:target="\_blank"}. 1. From the sidebar, select **Credentials**. 1. Select **Create Credentials**, and from the drop-down, select **OAuth client ID**. 1. Do the following: @@ -26,9 +31,9 @@ For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/ url="/images/administration/sso/google/googleSSO.png" alt="Creating an OAuth client" caption="Creating an OAuth client" - max-width="30%" + max-width="70%" %} - + * Select **Create**. * From the OAUth client created dialog, note down **Your Client ID** and **Your Client Secret**. @@ -38,13 +43,15 @@ For general instructions on SSO setup, see the [overview]({{site.baseurl}}/docs/ url="/images/administration/sso/google/googleSSO2.png" alt="Getting the Client ID and secret" caption="Getting the Client ID and secret" - max-width="30%" + max-width="70%" %} -You will need the Client ID and secret to configure SSO for Google in Codefresh. + You will need the Client ID and secret to configure SSO for Google in Codefresh. +{:start="5"} +1. Continue with [Step 2: Configure SSO settings for Google in Codefresh](#step-2-configure-sso-settings-for-google-in-codefresh). -### Configure SSO for Google in Codefresh +## Step 2: Configure SSO settings for Google in Codefresh 1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. 1. Select **+ Add Single Sign-On**, **Google**, and then **Next**. @@ -80,8 +87,9 @@ You will need the Client ID and secret to configure SSO for Google in Codefresh. {:start="5"} 1. Note down the Client Name, as you need it to set the redirect URI in Google. +1. Continue with [Step 3: Set up Redirect URI in Google](#step-3-set-up-redirect-uri-in-google). -### Set up Redirect URI +### Step 3: Set up Redirect URI in Google 1. Go back to the Google Console Developer dashboard, and click the edit button on the OAuth 2.0 Client IDs that you created before. 1. For **Authorized Redirect URIs**, in the **URIs** field, enter the Client Name you noted down to generate the *Authorized Redirect URIs* * Example Client Name: `t0nlUJoqQlDv` @@ -96,9 +104,9 @@ You will need the Client ID and secret to configure SSO for Google in Codefresh. max-width="30%" %} -This concludes the basic SSO setup for Google. +You have now completed SSO setup for Google via OIDC. -### Synchronize teams via Codefresh CLI +## Synchronize teams via Codefresh CLI For team/group synchronization you also need a service account. In the Codefresh configuration screen there are some optional fields that you can fill, in order to @@ -138,7 +146,16 @@ get team synchronization via the Codefresh CLI. You need to create a service acc * `JSON Keyfile`: The contents of the JSON file * `Admin email`: The user `admin.google.com` -Now you can [synchronize teams using the Codefresh CLI]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. -#### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. \ No newline at end of file +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up OIDC Federated SSO]({{site.baseurl}}/docs/single-sign-on/oidc) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/administration/single-sign-on/sso-okta.md b/_docs/single-sign-on/oidc/oidc-okta.md similarity index 69% rename from _docs/administration/single-sign-on/sso-okta.md rename to _docs/single-sign-on/oidc/oidc-okta.md index fdaa0e6a..8e649c2f 100644 --- a/_docs/administration/single-sign-on/sso-okta.md +++ b/_docs/single-sign-on/oidc/oidc-okta.md @@ -1,20 +1,25 @@ --- title: "Okta Single Sign-On (SSO)" description: " " -group: administration -sub_group: single-sign-on +group: single-sign-on +sub_group: oidc redirect_from: - /docs/enterprise/single-sign-on/sso-okta/ toc: true --- +Set up SSO for Okta using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). -In this page we will see the process of setting up Okta SSO with Codefresh. For the general instructions of SSO setup -see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). -### Set up Okta as an Identity provider +Set up OIDC SSO for Okta in Codefresh by: +1. Setting up Okta as an IdP Codefreh in Okta +1. Configuring SSO settings for Okta in Codefresh +1. Configuring URIs in Okta + +## Step 1: Set up Okta as an identity provider 1. Log in to your Okta account, or create an Okta account if you don't have one. -1. On the general Okta dashboard, to open the Okta Admin Dashboard select **Admin**. +1. In the general Okta dashboard, to open the Okta Admin Dashboard, select **Admin**. {% include image.html lightbox="true" @@ -22,7 +27,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image5.png" alt="Okta Dashboard" caption="Okta Dashboard" - max-width="30%" + max-width="70%" %} {:start="3"} @@ -34,7 +39,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image8.png" alt="Okta Applications" caption="Okta Applications" - max-width="30%" + max-width="70%" %} {:start="4"} @@ -46,7 +51,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image9.png" alt="Create new application" caption="Create new application" - max-width="30%" + max-width="70%" %} {:start="5"} @@ -55,21 +60,23 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- * For the **Sign on method**, select **OpenID Connect**. * Select **Create**. - {% include image.html + {% include image.html lightbox="true" file="/images/administration/sso/okta/image1.png" url="/images/administration/sso/okta/image1.png" alt="Choose Sign-on method" caption="Choose Sign-on method" - max-width="30%" + max-width="70%" %} {:start="6"} 1. Configure OIDC integration in **General Settings**: * App name (e.g. Codefresh). * App logo (optional). Feel free to download and add this [picture]({{site.baseurl}}/images/administration/sso/okta/codefresh-logo.png). - * Login redirect URI: `https://g.codefresh.io/api/auth//callback` - where is generated by Codefresh when you configure SSO settings. For now, use a temp value such as `https://g.codefresh.io/api/auth/temp/callback`. + * Login redirect URI: `https://g.codefresh.io/api/auth//callback` + where: + is generated by Codefresh when you configure SSO settings. + For now, use a temp value such as `https://g.codefresh.io/api/auth/temp/callback`. {% include image.html lightbox="true" @@ -77,12 +84,12 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/okta/image4.png" alt="OpenID integration" caption="OpenID integration" - max-width="30%" + max-width="70%" %} * Select **Save**. -### Okta settings needed for SSO in Codefresh -To configure SSO settings for Okta in Codefresh, you meed the Client ID, Client Secret, Access token, and the Codefresh application ID as defined in Okta. +## Configure OIDC SSO settings for Okta in Codefresh +To configure OIDC SSO settings for Okta in Codefresh, you need the Client ID, Client Secret, Access token, and the Codefresh application ID as defined in Okta. Copy the values from the following screens: {% include image.html @@ -91,7 +98,7 @@ file="/images/administration/sso/okta/image7.png" url="/images/administration/sso/okta/image7.png" alt="Client ID and secret" caption="Client ID and secret" -max-width="30%" +max-width="70%" %} {% include image.html @@ -100,7 +107,7 @@ file="/images/administration/sso/okta/image2.png" url="/images/administration/sso/okta/image2.png" alt="Access token" caption="Access token" -max-width="30%" +max-width="70%" %} {% include image.html @@ -109,7 +116,7 @@ file="/images/administration/sso/okta/image3.png" url="/images/administration/sso/okta/image3.png" alt="App ID" caption="App ID" -max-width="30%" +max-width="70%" %} ### Configure SSO for Okta in Codefresh @@ -136,8 +143,12 @@ max-width="30%" Do not copy the URL from the admin view (e.g. `https://-admin.okta.com`), as it will not work. * **Access Token**: Optional. The OKTA API token used to sync groups and their users from OKTA to Codefresh. The token can be generated in OKTA by going to the security tab->API (see above). * **Application ID**: The Codefresh application ID in your OKTA organization, that will be used to sync groups and user from OKTA to Codefresh. This ID can be taken by navigating to your Codefresh APP in OKTA and copy it from the URL (see above). -1. Optional. To automatically sync teams or groups in Okta to Codefresh, set **Auto group sync** to **ON**. This action syncs groups every 12 hours. -1. Select **+Add**. Codefresh automatically generates the **Client Name** to which to identify the SSO configuration. +1. Optional. To automatically sync teams or groups in Okta to Codefresh, set **Auto group sync** to **ON**. + This action syncs groups every 12 hours. + > Though you can assign an Okta application to both groups and individual users, Codefresh _only syncs users who are part of teams_. + New users in Okta, _not_ assigned to a team, are **NOT** synced with Codefresh. You should first assign the user to a team for the sync to work. +1. Select **+Add**. + Codefresh automatically generates the **Client Name** to which to identify the SSO configuration. Note it down. {% include image.html @@ -146,27 +157,26 @@ max-width="30%" url="/images/administration/sso/okta/image6.png" alt="Client name" caption="Client name" - max-width="30%" + max-width="70%" %} -### Configure URIs in Okta +## Configure URIs in Okta 1. In the Okta application, go to **General Settings**, and update the following with the client name generated by Codefresh: - * Login redirect URIs - `https://g.codefresh.io/api/auth//callback` - * Initiate login URI - `https://g.codefresh.io/api/auth/` + * Login redirect URIs: `https://g.codefresh.io/api/auth//callback` + * Initiate login URI: `https://g.codefresh.io/api/auth/` You have now completed SSO setup for Okta. -### How Okta syncing works -[Syncing with Okta]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) +## How Okta syncing works +[Syncing with Okta]({{site.baseurl}}/docs/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) only affects teams/groups, and not individual users. -Codefresh only syncs users who are part of teams, though you can assign an Okta application to both groups and individual users. -New users in Okta, _not_ assigned to a team, are **NOT** synced with Codefresh. You should first assign the user to a team for the sync to work. + ### Sync teams after initial SSO setup There are two ways to set up automatic syncing of teams: -* Pipeline running a CLI command: Create a Codefresh pipeline the runs the CLI command `codefresh synchronize teams my-okta-client-name -t okta` as explained in the [pipeline sync page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). +* Pipeline running a CLI command: Create a Codefresh pipeline the runs the CLI command `codefresh synchronize teams my-okta-client-name -t okta` as explained in the [pipeline sync page]({{site.baseurl}}/docs/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup). * Turn on the auto-sync toggle as part of the SSO configuration settings.: {% include image.html lightbox="true" @@ -177,6 +187,7 @@ There are two ways to set up automatic syncing of teams: max-width="50%" %} -### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) diff --git a/_docs/administration/single-sign-on/sso-onelogin.md b/_docs/single-sign-on/oidc/oidc-onelogin.md similarity index 63% rename from _docs/administration/single-sign-on/sso-onelogin.md rename to _docs/single-sign-on/oidc/oidc-onelogin.md index 3e6213f8..bb66f268 100644 --- a/_docs/administration/single-sign-on/sso-onelogin.md +++ b/_docs/single-sign-on/oidc/oidc-onelogin.md @@ -1,23 +1,25 @@ --- title: "OneLogin Single Sign-On (SSO)" description: " " -group: administration -sub_group: single-sign-on +group: single-sign-on +sub_group: oidc redirect_from: - /docs/enterprise/single-sign-on/sso-onelogin/ toc: true --- -In this page we will see the process of setting up OneLogin SSO with Codefresh. For the general instructions of SSO setup -see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/). +Set up SSO for OneLogin using OIDC. +For a general overview on OIDC, see [Setting up OIDC Federated SSO]({site.baseurl}}/docs/single-sign-on/oidc). +Set up OIDC SSO for OneLogin in Codefresh by: +1. Setting up OneLogin as an IdP +1. Configuring SSO settings for OneLogin in Codefresh +1. Configuring URIs in Okta -## Set up OneLogin as an Identity provider +## Step 1: Set up OneLogin as an identity provider for Codefresh +Configure the application in the OneLogin dashboard. - -1. Configure app on the OneLogin dashboard: - {:start="1"} - 1. Log in to the [OneLogin Administration Dashboard](https://www.onelogin.com/), and select **Apps > Add Apps**. +1. Log in to the [OneLogin Administration Dashboard](https://www.onelogin.com/){:target="\_blank"}, and select **Apps > Add Apps**. {% include image.html lightbox="true" @@ -25,10 +27,11 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step1.png" alt="OneLogin Dashboard" caption="OneLogin Dashboard" - max-width="30%" + max-width="50%" %} - {:start="2"} - 1. Find **OpenId Connect (OIDC)** app using the search field. + +{:start="2"} +1. Find **OpenId Connect (OIDC)** app using the search field. {% include image.html lightbox="true" @@ -36,10 +39,11 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step2.png" alt="Locating the OpenId Connect App" caption="Locating the OpenId Connect App" - max-width="30%" + max-width="50%" %} - {:start="3"} - 1. Setup a Codefresh application. + +{:start="3"} +1. Set up a Codefresh application. {% include image.html lightbox="true" @@ -47,10 +51,11 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step3.png" alt="Adding a new application" caption="Adding a new application" - max-width="30%" + max-width="50%" %} - {:start="4"} - 1. From the sidebar, select **SSO** and copy the **Client ID** and the **Client Secret**. + +{:start="4"} +1. From the sidebar, select **SSO** and copy the **Client ID** and the **Client Secret**. Set **Application Type** to **Web**, and **Token endpoint Authentication** to **POST**. {% include image.html @@ -59,10 +64,13 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/step4-post.png" alt="Copying the values of Client ID and Secret" caption="Copying the values of Client ID and Secret" - max-width="30%" + max-width="50%" %} -### Configure SSO for OneLogin in Codefresh +{:start="5"} +1. Continue with [Step 2: Configure SSO for OneLogin in Codefresh](#step-2-configure-sso-for-onelogin-in-codefresh). + +## Step 2: Configure SSO for OneLogin in Codefresh 1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. 1. Select **+ Add Single Sign-On** and then select **OneLogin**. @@ -73,7 +81,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- url="/images/administration/sso/onelogin/sso-csdp-onelogin.png" alt="SSO settings for OneLogin in Codefresh" caption="SSO settings for OneLogin in Codefresh" - max-width="30%" + max-width="50%" %} {:start="2"} @@ -83,7 +91,7 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- * **Client ID**: The Client ID you copied from OneLogin. * **Client Secret**: The Client Secret you copied from OneLogin. * **Domain**: Optional. The domain to be used for authentication, only for users who must connect via a custom domain. - * **API CLIENT ID** and **API CLIENT SECRET**: Used for [team sync]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) only. For details, see the [official documentation](https://developers.onelogin.com/api-docs/1/getting-started/working-with-api-credentials). + * **API CLIENT ID** and **API CLIENT SECRET**: Used for [team sync]({{site.baseurl}}/docs/single-sign-on/sso-setup-oauth2/#syncing-of-teams-after-initial-sso-setup) only. For details, see the [official documentation](https://developers.onelogin.com/api-docs/1/getting-started/working-with-api-credentials){:target="\_blank"}. {:start="3"} 1. Select **+ Add**. Codefresh generates the client name. Note this down. @@ -97,9 +105,10 @@ see the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso- max-width="100%" %} - +{:start="4"} +1. Continue with [Step 3: Set up login and redirect URIs in OneLogin](#step-3-set-up-login-and-redirect-uris-in-onelogin). -### Set up login and redirect URIs +## Step 3: Set up login and redirect URIs in OneLogin Go back to the OneLogin dashboard. @@ -115,10 +124,11 @@ file="/images/administration/sso/onelogin/step8.png" url="/images/administration/sso/onelogin/step8.png" alt="Login and Redirect URI" caption="Login and Redirect URI" -max-width="30%" +max-width="50%" %} -You have now completed SSO setup for OneLogin. +You have now completed SSO setup for OneLogin via OIDC. -#### What to read next -See the [overview page]({{site.baseurl}}/docs/administration/single-sign-on/sso-setup-oauth2/#testing-your-identity-provider) on how to test the integration, activate SSO for collaborators and create sync jobs. \ No newline at end of file +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/single-sign-on/saml-setup.md b/_docs/single-sign-on/saml-setup.md new file mode 100644 index 00000000..afd68dbe --- /dev/null +++ b/_docs/single-sign-on/saml-setup.md @@ -0,0 +1,141 @@ +--- +title: "Setting up SAML2 Federated SSO" +description: "SAML2 Federated Single Sign-On (SSO) setup" +group: single-sign-on +redirect_from: + - /docs/sso/sso-setup-saml2/ + - /docs/enterprise/single-sign-on/sso-setup-saml2/ +toc: true +--- + +As Identity Providers (IdPs) come in all shapes and sizes, this topic discusses in general what you must do to configure Federated SSO for SAML. + As you will see in the description below, the person in your organization responsible for managing your IdP will need to interact with Codefresh support to successfully set up a trust between your IdP and Codefresh as an SP. + +{:.text-secondary} +## Before you set up Federated SSO + 1. Have your account set up with Codefresh enterprise plan. + 2. Ensure you have a working SAML 2.0 compliant identity provider (IdP). + 3. Identify someone in your organization who is familiar with configuring and managing your organization's IdP. + 4. Ensure that your IdP's system clock is synchronized with a reliable time source. If it's not, tokens generated will be unusable and SSO will fail. + +{:.text-secondary} +### Summary of Federated SSO setup + +{% include image.html + lightbox="true" + file="/images/sso-flow.png" + url="/images/sso-flow.png" + alt="sso-flow.png" + max-width="100%" +%} + +{:.text-secondary} +### SAML attributes + +Codefresh expects the following user attributes to be passed through SAML between your IdP and Codefresh SP: + - User email address + - User first name + - User last name + - User full name + - User unique ID that isn't subject to change in your identity management environment + +{:.text-secondary} +### How does the connection process work? + + {% include image.html +lightbox="true" +file="/images/sso-diagram.png" +url="/images/sso-diagram.png" +alt="sso-diagram.png" +max-width="100%" + %} + +Once Federated SSO has been configured, the process works as follows: + +
                                                            + + Steps 2 to 7 occur in the background and are transparent to the user. +
                                                            + +1. A user logs in to Codefresh and enters the email address. +2. The user is redirected to the Codefresh Service Provider (SP) to initiate SSO. +3. The user’s browser is then redirected to the customer IdP. +4. Once authenticated by the corporate side, a SAML token is sent to the user’s browser. +5. The SAML assertion is then forwarded to Codefresh SP. +6. If you are a valid Codefresh user for this SSO connection, an SSO token is returned to the user’s browser. +7. The user’s browser then returns a token to Codefresh and access is granted for your account. + +## SAML SSO configuration in Codefresh + +Here's what you need to do to configure SSO via SAML in Codefresh: + +1. Configure SSO settings for the IdP in Codefresh: + This generally includes defining settings in both in Codefresh and in the IdP. + Codefresh supports SAML SSO for the following: + * [JumpCloud]({{site.baseurl}}/docs/single-sign-on/saml/saml-jumpcloud) + * [Okta]({{site.baseurl}}/docs/single-sign-on/saml/saml-okta) + * [OneLogin]({{site.baseurl}}/docs/single-sign-on/saml/saml-onelogin) + * [PingID](({{site.baseurl}}/docs/single-sign-on/saml/saml-pingid) + + Notes for SSO via SAML: + **SSO settings** + + * Assertion URL + The Assertion URL which is the Service Provider SSO endpoint, also referred to as the Callback URL or Client ID, is generated _after_ you create the integration. + + * Provider + Currently, we support GSuite for SAML. If you are using a different provider, leave this field empty. + For GSuite, you can define the sync settings, Admin Email and the JSON Keyfile. + For instructions, see [Google SSO]({{site.baseurl}}/docs/single-sign-on/sso-google/#synchronize-teams-with-the-codefresh-cli). + + > These settings are for the SaaS version of Codefresh. For an on-premises setup, use the URLs that match your installation. + +1. Test integration with the IdP + + >Before enabling SSO for users, you **MUST** make sure that it is working for the test user. If SSO is enabled for a user, Codefresh blocks logins through other IDPs for this user and only the enabled SSO is allowed. If the selected SSO method does not work for some reason, the user will be locked out of Codefresh. + + 1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. + 1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. + 1. Add an active user to be used for testing. We recommend you use your own user. + 1. Change Login method by selecting your Auth provider from the SSO drop-down. + + {% include image.html + lightbox="true" + file="/images/administration/sso/collaborators.png" + url="/images/administration/sso/collaborators.png" + alt="Adding collaborators" + caption="Adding collaborators" + max-width="70%" + %} + + {:start="5"} + 1. Keep the current browser session open, and log in via Corporate SSO in an incognito tab (or another browser). + + {% include image.html + lightbox="true" + file="/images/administration/sso/sign-with-sso.png" + url="/images/administration/sso/sign-with-sso.png" + alt="Sign-in with SSO" + caption="Sign-in with SSO" + max-width="50%" + %} + +1. (Optional) [Set an IdP as the default provider]({{site.baseurl}}/docs/single-sign-on/team-sync/#set-a-default-sso-provider-for-account) + You can select an IdP as the default SSO provider for a Codefresh account. This means that all the new users added to that account will automatically use the selected IdP for signin. +1. (Optional) [Set the SSO method for each user]({{site.baseurl}}/docs/single-sign-on/team-sync/#select-sso-method-for-individual-users) + You can also select if needed, a different SSO provider for every user or for specific users. + +> Codefresh has an internal cache for SSO configuration, and it can take up to five minutes for your changes to take effect. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on) + + + + + + + + + + diff --git a/_docs/single-sign-on/saml/saml-jumpcloud.md b/_docs/single-sign-on/saml/saml-jumpcloud.md new file mode 100644 index 00000000..bd7d2f73 --- /dev/null +++ b/_docs/single-sign-on/saml/saml-jumpcloud.md @@ -0,0 +1,84 @@ +--- +title: JumpCloud SSO via SAML +description: Set up JumpCloud via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for JumpCloud using SAML. +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Set up SAML SSO for JumpCloud by: +1. Configuring SSO settings for JumpCloud via SAML in Codefresh +1. Configuring SSO settings for Codefresh in JumpCloud +1. Completing SSO configuration for JumpCloud in Codefresh + +## Step 1: Configure SSO settings for JumpCloud via SAML in Codefresh + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. In the sidebar, from Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any arbitrary name for this integration. + * **IDP Entry**: Type in any character. You will enter the correct value from JumpCloud in the final step. + * **Application Certificate**: You will enter the correct value from JumpCloud in the final step. +1. If GSuite is your provider, select it as the **Provider**, and define the settings below. Otherwise leave the field empty. + * **Admin Email**: The email of the user with access to `admin.google.com`. + * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. + * **Sync interval**: Optional. The time interval at which to sync. + * **Sync Field**: Optional. TBD + * **JSON Keyfile**: . TBD +1. Click **Add**. + The SAML integration for JumpCloud is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the JumpCloud SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 2: Configure SSO settings for Codefresh in JumpCloud](#step-2-configure-sso-settings-for-codefresh-in-jumpcloud). + +## Step 2: Configure SSO settings for Codefresh in JumpCloud + + +1. In JumpCloud, go to **User Authentication > SSO**. +1. To configure Codefresh as a new application, click **+**. +1. Select **Custom SAML**. +1. Add a **Display Label** for the application you will create. +1. Click the **SSO** tab, and enter the following: + 1. **IDP Entity ID**: Enter the user-defined or generated Client Name from Codefresh. For example, `gujNGnhXTSmK`. + > Make sure there no spaces before the name when copying and pasting. + 1. **SP Entity ID**: `g.codefresh.io`. + 1. **ACS URL**: Enter the Assertion URL (Callback URL) generated in Codefresh. + 1. **Login URL**: Enter the Assertion URL without the `/callback`. + 1. **IDP URL**: Add a custom name or leave the default. You will need the value to complete the SSO configuration in Codefresh. + 1. **Attributes**: Add the following: + - **email**: email + - **firstName**: firstname + - **lastName**: lastname + 1. Click **Activate** and **Continue**. +1. When you get a notification on the top right to download the Certificate, download the Certificate. +1. Continue with [Step 3: Complete SSO configuration for JumpCloud in Codefresh](#step-3-complete-sso-configuration-for-jumpcloud-in-codefresh). + +## Step 3: Complete SSO configuration for JumpCloud in Codefresh +As the final step in configuring SSO for JumpCloud, add the IDP Entry and Certificate values from JumpCloud. + +1. **IDP Entry**: The IDP URL from the SSO tab in Jump Cloud. +1. **Application Certificate**: Copy and paste the content between `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` from the certificate you downloaded into the field. You can also include the BEGIN and END lines. + - **Note**: You will get a warning when editing the Certificate section. +1. Click **Save**. + +You have completed SSO integration for JumpCloud via SAML in Codefresh. + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/single-sign-on/saml/saml-okta.md b/_docs/single-sign-on/saml/saml-okta.md new file mode 100644 index 00000000..e404c2dd --- /dev/null +++ b/_docs/single-sign-on/saml/saml-okta.md @@ -0,0 +1,84 @@ +--- +title: Okta SSO via SAML +description: Setting up Okta via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for OKta using SAML. +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Setting up SAML SSO for Okta includes: +1. Configuring SSO settings for Okta via SAML in Codefresh +1. Configuring SSO settings for Codefresh in JumpCloud +1. Completing SSO configuration for JumpCloud in Codefresh + +## Step 1: Configure SSO settings for Okta via SAML in Codefresh +Create a SAML account for Okta in Codefresh to create an integration for Codefresh in Okta. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any name you want for the integration. + * **IDP Entry**: Type in any character. You will enter the correct value from Okta in the final step. + * **Application Certificate**: You will enter the correct value from Okta in the final step. +1. Click **Add**. + The SAML integration for Okta is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the Okta SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 2: Configure SSO settings for Codefresh in Okta](#configure-sso-settings-for-codefresh-in-okta). + +## Step 2: Configure SSO settings for Codefresh in Okta + +1. Navigate to **Applications**. +1. Select **Create App Integration > SAML2.0**, and click **Next**. +1. General Settings: + - Fill in the Name and any other settings you need. + - Click **Next**. +1. Configure SAML: + - **Single Sign On URL**: + - **ACS URL**: Enter the Assertion URL (Callback URL) generated in Codefresh. + - **Audience URL**: `g.codefresh.io` + - **Name ID Format**: `EmailAddress` + - Attribute Statements + - Leave **Name Format** as Unspecified + - **firstName**: `user.firstName` + - **lastName**: `user.lastName` + - **email**: `user.email` + - Click **Next**. +1. Feedback: + - If displayed, complete the form. + - Click **Finish**. +1. Sign On Tab: + - Select **View SAML Setup Instructions** on the right. + - Keep the page open as you will need it to complete the setup for Okta in Codefresh. +1. Continue with [Step 3: Configure SSO settings for Codefresh in Okta](#configure-sso-settings-for-codefresh-in-okta). + + +## Step 3: Complete SSO configuration for Okta in Codefresh +Complete SSO setup for Okta via SAML in Codefresh. + +1. **IDP Entry**: The IDP URL from the SSO tab in Okta. +1. **Application Certificate**: Copy and paste the content between `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` from the certificate you downloaded into the field. You can also include the BEGIN and END lines. + - **Note**: You will get a warning when editing the Certificate section. +1. Click **Save**. + +You have completed SSO integration for OKta via SAML in Codefresh. + +## Test SSO connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/single-sign-on/saml/saml-onelogin.md b/_docs/single-sign-on/saml/saml-onelogin.md new file mode 100644 index 00000000..1da7b761 --- /dev/null +++ b/_docs/single-sign-on/saml/saml-onelogin.md @@ -0,0 +1,83 @@ +--- +title: OneLogin via SAML +description: Setting Up OneLogin via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for OneLogin using SAML in Codefresh. +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Setting up SAML SSO for OneLogin includes: +1. Adding the Codefresh application in OneLogin +1. Configuring SSO settings for OneLogin via SAML in Codefresh +1. Configuring SSO settings for Codefresh in OneLogin + +## Step 1: Add Codefresh application in OneLogin + +1. From the OneLogin toolbar, **Applications** section,and then select **Add App** on the top right. +1. Search for **SAML Custom Connector (advanced)** and select it. +1. Add a **Display Name**. Leave the other settings which are optional. +1. Click **Save**. +1. From the sidebar, select **SSO** and keep the tab open. +1. Continue with [Step 2: Configure SSO settings for OneLogin via SAML in Codefresh](#configure-sso-settings-for-onelogin-via-saml-in-codefresh). + +## Step 2: Configure SSO settings for OneLogin via SAML in Codefresh + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any arbitrary name for this integration. + * **IDP Entry**: SAML 2.0 Endpoint (HTTP) from the SSO section in OneLogin. + * **Application Certificate**: X.509 Certificate from the SSO section in OneLogin. + * Click and open **View Details**, preferably in a new tab. + * Under X.509 Certificate, click **Copy**. + * Paste the content into the Application Certificate. + * Remove the lines, `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. +1. Click **Add**. + The SAML integration for OneLogin is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the OneLogin SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 3: Configure SSO settings for Codefresh in OneLogin](#configure-sso-settings-for-codefresh-in-onelogin). + +## Step 3: Configure SSO settings for Codefresh in OneLogin + +1. Return to OneLogin, and from the sidebar, select **Configuration**. +1. Enter the following: + * **Audience** (EntityID): `g.codefresh.io`. + * **Recipient**: The Assertion URL you copied in the previous step. + * **ACS (Consumer) URL Validator**: The Assertion URL in Regex format. For more info on this, view OneLogin's [Setup Page](https://onelogin.service-now.com/support?id=kb_article&sys_id=c89fefdadb2310503de43e043996195a&kb_category=93e869b0db185340d5505eea4b961934){:target="\_blank"}. + * **ACS (Consumer) URL**: The Assertion URL. + * **Login URL**: `https://g.codefresh.io/login` + * **SAML Initiator**: Service Provider. + * Click **Save**. +1. In OneLogin, go to the [Users](https://cfsupport.onelogin.com/users) page, and do the following: + * Select the User. + * Go to **Applications**, and click **+**. + * Select the SAML App with the Display Name you entered in Codefresh. + * Click **Continue**. + * Make sure the **NameID** is set to the email address. + * Click **Save**. + +You have completed SSO integration for OneLogin via SAML. + + + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) diff --git a/_docs/single-sign-on/saml/saml-pingid.md b/_docs/single-sign-on/saml/saml-pingid.md new file mode 100644 index 00000000..56d46213 --- /dev/null +++ b/_docs/single-sign-on/saml/saml-pingid.md @@ -0,0 +1,89 @@ +--- +title: PingID SSO via SAML +description: Setting up PingID SSO via SAML +group: single-sign-on +sub_group: saml +toc: true +--- + +Set up SSO for PingID using SAML in Codefresh. +> The configuration described here is for PingID SSO and not PingID Federate. The steps can be used as a general guide for Ping Federate. + +For a general overview on SAML, see [Setting up SAML2 Federated SSO]({site.baseurl}}/docs/single-sign-on/saml-setup). + +>If you do not see SAML in the SSO list, please create a support ticket to enable SAML for your account. + +Setting up SAML SSO for PingID includes: +1. Configuring SSO settings for PingID via SAML in Codefresh +1. Configuring SSO settings for Codefresh in PingID +1. Completing SSO configuration for PingID in Codefresh + + +## Step 1: Configure SSO settings for PingID via SAML in Codefresh + +Configure SSO for PingID via SAML in Codefresh. The Assertion URL is automatically generated when you add the integration. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Single Sign-On**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Click **Add single-sign-on**, select **SAML**, and then click **Next**. +1. Enter the connection details: + * **Display Name**: Any arbitrary name for this integration. + * **IDP Entry**: Type in any character. You will enter the correct value from PingID in the final step. + * **Application Certificate**: Type in any character. You will enter the correct value from PingID in the final step. + * **Auto Sync users and teams to Codefresh**: Supported for Google/GSuite SAML integration. Select to automatically sync user accounts in to your Codefresh account. Optionally, define the time interval at which to sync, in hours, from 1 to 24. If you don't specify an interval, the sync interval is every 12 hours. +1. Click **Add**. + The SAML integration for PingID is added and appears in the list of SSOs. +1. In the Single Sign-On page, click the **Edit** icon for the PingID SAML integration you created. +1. Copy the **Assertion URL** (client ID) that was automatically generated when you added the integration. +1. Continue with [Step 2: Configure SSO settings for Codefresh in PingID](#configure-sso-settings-for-codefresh-in-pingid). + + +## Step 2: Configure SSO settings for Codefresh in PingID + + +1. Log in to PingID and select the **Environment**. +1. Select **Connections > Applications**. +1. To add Codefresh as a new application, click **+**. +1. Enter the **Application Name** and **Description**. +1. Select **SAML Application** and then click **Configure**. +1. Select **Manually Enter** and define the following: + - **ACS URL**: The Assertion URL you copied from Codefresh. + - **Entity ID**: `g.codefresh.io`. +1. Click **Save**. +1. Go to the **Configuration** tab. +1. Download the X509 Certificate or Metadata. +1. Click **Attribute Mappings**, and add the following mappings + - **email**:Email address + - **firstName**: Given name + - **lastName**: Family name + + > For PingID Federate, you must add the follwing mapping: NameID <- Email Address + +1. Toggle the **Enable** option to on to make the application available. +1. Continue with [Step 3: Complete SSO configuration for PingID in Codefresh](#complete-sso-configuration-for-pingid-in-codefresh). + + +## Step 3: Complete SSO configuration for PingID in Codefresh +As the final step in configuring SSO for PingID, add the IDP Entry and Certificate values from PingID. + +1. **IDP Entry**: The IDP URL from the SSO tab in Jump Cloud. +1. **Application Certificate**: Copy and paste the content between `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` from the certificate you downloaded into the field. You can also include the BEGIN and END lines. + - **Note**: You will get a warning when editing the Certificate section. +1. Click **Save**. + +You have completed SSO integration for PingID via SAML in Codefresh. + + +## Test SSO Connection + +Now test the SSO with a test user in a different browser or private/incognito browser to make sure the integration works as it should. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, below Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/2.0/account-settings/single-sign-on){:target="\_blank"}. +1. Locate a test user, and from the SSO list, select the integration name to enable SSO for that user. +1. In a different browser or private/incognito browser window use the Corporate option to log in. + +## Related articles +[Federated Single Sign-On (SSO) overview]({{site.baseurl}}/docs/single-sign-on/) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) +[Common configuration for SSO providers]({{site.baseurl}}/docs/single-sign-on/team-sync) \ No newline at end of file diff --git a/_docs/administration/single-sign-on.md b/_docs/single-sign-on/single-sign-on.md similarity index 75% rename from _docs/administration/single-sign-on.md rename to _docs/single-sign-on/single-sign-on.md index bf75ea99..5df776ca 100644 --- a/_docs/administration/single-sign-on.md +++ b/_docs/single-sign-on/single-sign-on.md @@ -1,20 +1,20 @@ --- title: "Federated Single Sign-On (SSO) overview" description: "" -group: administration +group: single-sign-on redirect_from: - /docs/sso/federated-sso-overview/ - /docs/enterprise/single-sign-on/ toc: true --- - Customers in our **enterprise plan** can log in to Codefresh, using Federated Single Sign-On (SSO). To learn more, please [contact sales](https://codefresh.io/contact-sales/). + Customers in our **enterprise plan** can log in to Codefresh using Federated Single Sign-On (SSO). To learn more, please [contact sales](https://codefresh.io/contact-sales/){:target="\_blank"}. + + Federated identity management enables the cross organizational exchange of identity information across Internet domains, without migrating credential information or consolidating several security domains. With federation, customers can authenticate with their corporate credentials to gain access to Codefresh. - Federated identity management enables the cross organizational exchange of identity information across Internet domains, without migrating credential information or consolidating several security domains. With federation, customers can authenticate with their corporate credentials to gain access to Codefresh. This means that you can sign in to your Codefresh account using your corporate credentials. - To set up Federated SSO, your identity management organization must use either of the following: - - **A Security Assertion Markup Language 2.0 (SAML 2.0)** compliant Identity Provider (IdP), configured to communicate with Codefresh Service Provider (SP). For example, ADFS, Auth0, Okta and Ping Identity. + - **A Security Assertion Markup Language 2.0 (SAML 2.0)** compliant Identity Provider (IdP), configured to communicate with the Codefresh Service Provider (SP). For example, ADFS, Auth0, Okta and Ping Identity. - **OpenID Connect (OAuth 2.0)** identity management. For example, Google, GitHub, Bitbucket and GitLab. @@ -22,8 +22,8 @@ Asserting the identity of the user to Codefresh ensures seamless SSO from a brow A SAML2-based federated system comprises: - - **Identity Provider (IdP)**: The identity provider belongs to the corporation that manages accounts for a large number of users who need secure internet access to the services or Web-based applications of another organization. In our case, it's a customer's organization that requires access to Codefresh. - - The IdP manages the corporate users, and integrates with Identity Management systems in the customer's organization responsible for authentication. The Identity Management systems integrate with authentication providers such as LDAP or AD. + - **Identity Provider (IdP)**: The identity provider belongs to the corporation that manages accounts for a large number of users who need secure internet access to the services or web-based applications of another organization. In our case, it's the customer's organization that requires access to Codefresh. + - The IdP manages the corporate users, and integrates with Identity Management systems in the customer's organization responsible for authentication. The Identity Management systems integrate with authentication providers such as LDAP or AD(Active Directory). - All user authentication is carried out via Identity Management systems integrated with the IdP. - For successfully authenticated users, the IdP sends a SAML assertion to the Codefresh service provider that enables the user to access Codefresh. @@ -33,8 +33,8 @@ A SAML2-based federated system comprises: A trust must be set up between the customer IdP and Codefresh as an SP. Once the trust has been set up, and a user has been authenticated via the IdP using corporate credentials, the user can access the Codefresh platform. -{:.text-secondary} -### Why use Federated SSO + +## Why use Federated SSO Using federated SSO significantly simplifies cross-domain user management as follows: @@ -43,8 +43,6 @@ Using federated SSO significantly simplifies cross-domain user management as fol * Corporate credentials aren't exposed to the SaaS provider. -### What to read next +## What to read next [Setting Up SAML2 Federated Single Sign-On (SSO)](sso-setup-saml2) -[Setting Up OpenID Connect Federated Single Sign-On](sso-setup-oauth2) - - +[Setting Up OpenID Connect Federated Single Sign-On](sso-setup-oauth2) diff --git a/_docs/single-sign-on/team-sync.md b/_docs/single-sign-on/team-sync.md new file mode 100644 index 00000000..87eb0700 --- /dev/null +++ b/_docs/single-sign-on/team-sync.md @@ -0,0 +1,166 @@ +--- +title: Common configuration for SSO providers +description: "Set up team sync, select default SSO provider" +group: single-sign-on +toc: true +--- + +Once you create an SSO provider account in Codefresh, you can +* Automatically or manually sync between the teams created in Codefresh and your Identity Provider (IdP) +* Set a default SSO provider for your account +* Select an SSO provider for each user + + +## Syncing teams with IdPs +Team sync enables all users of the team + +You can sync teams: +* Automatically, in the Codefresh UI when you set up the SSO account for the IdP, through the **Auto-sync team** option. For details, see the SSO setup for your IdP. +* Manually, through the [synchronize teams command](https://codefresh-io.github.io/cli/teams/synchronize-teams/) via the [Codefresh CLI](https://codefresh-io.github.io/cli/) + +> Team-sync is supported for OIDC providers. For SAML, team-sync is supported only for Google. + + +Example: + +To sync your Azure teams, run: + + +```shell +codefresh synchronize teams -t azure +``` +where: +`` is the Client Name/Assertion URL/Callback URL that is automatically generated by Codefresh when you save the SSO configuration for your provider. + + +{% include image.html +lightbox="true" +file="/images/administration/sso/azure/client-name.png" +url="/images/administration/sso/azure/client-name.png" +alt="SSO Client Name" +caption="SSO Client Name" +max-width="40%" +%} + + +Though you can run this command manually it makes more sense to run it periodically as a job. And the obvious +way to perform this is with a Codefresh CI pipeline. The CLI can be used as a [freestyle step]({{site.baseurl}}/docs/pipelines/steps/freestyle/). + +You can create a git repository with a [codefresh.yml]({{site.baseurl}}/docs/pipelines/what-is-the-codefresh-yaml/) file with the following content: + +```yaml +version: '1.0' +steps: + syncMyTeams: + title: syncTeams + image: codefresh/cli + commands: + - 'codefresh synchronize teams my-client-name -t azure' +``` + +To fully automate this pipeline you should set a [cron trigger]({{site.baseurl}}/docs/pipelines/triggers/cron-triggers/) for it. The cron-trigger will run this pipeline (and therefore synchronize the teams) in a fully automated manner. + +This way you can synchronize your teams every day/week/hour depending on you Cron trigger setup. + +### CLI synchronize teams command + +If the `Restrict inviting additional users by email address domain` is enabled for your account, running the `synchronize teams` command via the CLI, does not invite new users to Codefresh. The output of the command will look similar to the following: + +```json +[ + { + "action": "update", + "teams": [ + { + "team": "developers", + "members": [ + { + "members": [], + "action": "create" + } + ] + }, + { + "team": "DevOps", + "members": [ + { + "members": [], + "action": "create" + } + ] + } + ] + } +] +``` + +**Turn off the domain restriction**: + +1. Navigate to **Account Settings > User & Teams > Security** +1. Toggle off **Restrict inviting additional users by email address domain**. +1. Click **Save**. +1. Rerun the sync command. + +### Sync GitHub Organization Teams to Codefresh + +As an admin, you may want to sync your GitHub Organization Teams with your Codefresh account. At the same time, you do not want to set up an SSO provider and have the users use any login provider they choose. + +The Personal Access Token (PAT) from a user will sync ALL Organizations and ALL Teams to which the user has access. It is recommended to use a "machine" account to access the one organization you need. + +1. Create a PAT that has access to read organizations and teams +1. Install and configure the Codefresh CLI + + `codefresh synchronize teams github -t github --tk $GHTOKEN` + +1. The sync will invite all users except for those that have private email settings turned on. + +Once the initial sync happens, you can set up a cron trigger pipeline to run the command on a schedule. + +## Set a default SSO provider for account + +If you have multiple SSO providers, you can set one of them as the default provider for your account. +Setting a default provider assigns the selected SSO automatically to all new users. The link in the email invitation takes them directly to the login page of that SSO provider. + +1. In the Codefresh UI, go to [Single Sign-On](https://g.codefresh.io/2.0/account-settings/single-sign-on). +1. From the list, select the SSO account to set as default and click the **Edit** icon on the right. +1. Scroll down and select **Set as default**. + + + +## Select SSO method for individual users + +In addition to setting a default provider for your account, you can select a different provider for each user if so required. +* New users + If you have an SSO provider selected as the default, that provider is automatically assigned to new users, added either manually or via team synchronization. + +* Existing users + SSO login is not configured by default for existing users. You must _explicitly select_ the SSO provider for existing users. + If SSO login is already configured for an existing user, and you add a new identity provider, to change the SSO login to the new provider, you must _select_ the new provider for the user. + +1. In the Codefresh UI, on the toolbar, click the **Settings** icon and then select **Account Settings**. +1. From the sidebar, from Access & Collaboration, select [**Users & Teams**](https://g.codefresh.io/account-admin/collaborators/users){:target="\_blank"}. +1. Select the SSO provider from the SSO list. + +{% include image.html +lightbox="true" +file="/images/administration/sso/select-user-sso.png" +url="/images/administration/sso/select-user-sso.png" +alt="Selecting SSO method" +caption="Selecting SSO method" +max-width="50%" +%} + +## Related articles +[Setting up OIDC Federated SSO]({{site.baseurl}}/docs/single-sign-on/oidc) +[Setting up SAML2 Federated SSO]({{site.baseurl}}/docs/single-sign-on/saml-setup) + + + diff --git a/_docs/terms-and-privacy-policy/privacy-policy.md b/_docs/terms-and-privacy-policy/privacy-policy.md deleted file mode 100644 index b61dfc26..00000000 --- a/_docs/terms-and-privacy-policy/privacy-policy.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: "Privacy Policy" -description: "" -group: terms-and-privacy-policy -toc: true ---- -**CODEFRESH PRIVACY POLICY ("PRIVACY POLICY")** - -IMPORTANT: BY USING CODEFRESH INC. AND GROUP'S (" **COMPANY**" OR " **WE**") SOLUTION (COLLECTIVELY, THE **"SOLUTION**") YOU (" **YOU**") CONSENT TO THE TERMS AND CONDITIONS OF THIS PRIVACY POLICY AND CONSENT THAT ALL PERSONALLY IDENTIFIABLE INFORMATION ( **"PII**") THAT YOU SUBMIT OR THAT IS PROCESSED OR COLLECTED THROUGH OR IN CONNECTION WITH YOUR USE OF THE SOLUTION MAY BE PROCESSED BY THE COMPANY AND ITS AFFILIATES IN THE MANNER AND FOR THE PURPOSES DESCRIBED IN THE FOLLOWING PRIVACY POLICY. - - "PII" MEANS ANY INFORMATION RELATING TO AN IDENTIFIED OR IDENTIFIABLE NATURAL PERSON; AN IDENTIFIABLE NATURAL PERSON IS ONE WHO CAN BE IDENTIFIED, DIRECTLY OR INDIRECTLY, IN PARTICULAR BY REFERENCE TO AN IDENTIFIER SUCH AS A NAME, AN IDENTIFICATION NUMBER, LOCATION DATA, AN ONLINE IDENTIFIER OR TO ONE OR MORE FACTORS SPECIFIC TO THE PHYSICAL, PHYSIOLOGICAL, GENETIC, MENTAL, ECONOMIC, CULTURAL OR SOCIAL IDENTITY OF THAT NATURAL PERSON. - -YOU ARE NOT LEGALLY REQUIRED TO PROVIDE US WITH PII, HOWEVER, USE OF SOME OF THE FEATURES OF THE SOLUTION REQUIRE THAT YOU PROVIDE PII. IN THESE CASES, IF YOU CHOOSE TO WITHHOLD ANY PII REQUIRED IN RESPECT THEREOF, IT MAY NOT BE POSSIBLE FOR YOU TO ACCESS CERTAIN FEATURES OR USE THE SOLUTION. IF YOU DO NOT AGREE TO THE TERMS AND CONDITIONS SET FORTH HEREIN PLEASE DO NOT USE THE SOLUTION. - -We recognize that privacy is important. This Privacy Policy applies to all of the services, information, tools, features and functionality available on the Solution offered by the Company or its subsidiaries or affiliated companies and covers how PII that the Company collects and receives, including in respect of any use of the Solution, is treated. If you have any questions about this Privacy Policy, please feel free to contact us at: [privacy@codefresh.io](mailto:privacy@codefresh.io). - -Please also read CodeFresh's Term of Service at: [https://codefresh.io/docs/docs/terms-and-privacy-policy/terms-of-service/](https://codefresh.io/docs/docs/terms-and-privacy-policy/terms-of-service/), which describes the terms under which you use our Solution. - -**1. Information We Collect and How We Use It**. In order to provide, operate and improve our Solution and provide services in connection therewith, we may collect and process PII, including the following types of information: - 1.1 **Information You Provide. ** When you subscribe to use the Solution we ask you to provide PII, including: full name, email address and phone number. Payment card information is submitted to a third party payment services provider, subject to its terms of use and privacy policy. - 1.2. **Third Parties.** We sometimes supplement the information that you provide with information that is received from third parties. If you log-in to the Solution from and/or interact with any online platform (e.g. Google log-in) we may receive information from the respective platforms. - 1.3. **User Communications.** When you send emails or other communications to the Company, we may retain those communications in order to process your inquiries, respond to your requests and improve our Solution. We may send you newsletters and promotional communications, you may opt-out of this service at any time by visiting the [Email Unsubscribe](http://hs.codefresh.io/hs/manage-preferences/unsubscribe-simple?via_redirect=true) page. - 1.4. **User Information.** We collect and store content that you create, submit, upload or store in the process of using the Solution. When you use the Solution, we may automatically receive and record information from your browser, including without limitation information and statistics about your online/offline status, your IP address, geolocation data (including country and city), device identifiers, internet service provider, connection speed, type of browser, your regional and language settings and software and hardware attributes. Our systems may automatically record and store technical information regarding the method and nature of your use of the Solution. An IP address is a numeric code that identifies your browser on a network, or in this case, the Internet. Your IP address is also used to gather broad demographic information. The Company uses all of the PII that we collect to understand the usage trends and preferences of our users. - 1.5. **Aggregate and Analytical Data.** In the effort to produce insights regarding use of the Solution, we often conduct research on user demographics and behavior based on the PII and other information provided to us. This research may be compiled and analyzed on an aggregate basis, and we may share this aggregate data with its affiliates, agents and business partners. This aggregate information does not identify you personally. We may use such aggregate information for any purpose and also disclose aggregated user statistics in order to describe our services to current and prospective business partners, and to other third parties for other lawful purposes. - -**2. Cookies.** In order to collect the data described herein we may use temporary cookies that remain on your browser for a limited period of time. We may also use persistent cookies that remain on your browser until the Company's Solution are removed, in order to manage and maintain the Solution and record your use of the Solution. Cookies by themselves cannot be used to discover the identity of the user. A cookie is a small piece of information which is sent to and stored on your browser. Cookies do not damage your browser. Most browsers may allow you to block cookies but you may not be able to use some features on the Solution if you block them. You may set most browsers to notify you if you receive a cookie (this enables you to decide if you want to accept it or not). We may also use web beacons via the Solution to collect information. Web beacons or "gifs", are electronic images that may be used in our Solution or in our emails. We use Web beacons to deliver cookies, count visits and to tell if an email has been opened and acted upon. - -**3. Links.** Links to other services, sites and applications may be provided by the Company as a convenience to our users. The Company is not responsible for the privacy practices or the content of other sites and applications and you visit them at your own risk. This Privacy Policy applies solely to PII collected by us. - -**4. Children.** If you are a child under the age of 18, you must obtain parental consent prior to using our Solution. The Company will not knowingly contact or engage with children under the age of 18 without said parental consent. If you have reason to believe that a child has provided us with their PII, please contact us at the address given above and we will endeavor to delete that PII from our databases. - -**5. Information Sharing**. As part of providing the Solution our affiliates, agents, representatives and service providers may have access to your PII. We require these parties to process such information in compliance with this Privacy Policy and subject to security and other appropriate confidentiality safeguards. The Company may also share PII in the following circumstances: (a) as required for the provision, maintenance and improvement of the Solution; (b) if we become involved in a reorganization, merger, consolidation, acquisition, or any form of sale of some or all of our assets, with any type of entity, whether public, private, foreign or local; and/or (c) to satisfy applicable law or prevention of fraud or harm or to enforce applicable agreements and/or their terms, including investigation of potential violations thereof. - -**6. Information Security**. We follow generally accepted industry standards to protect against unauthorized access to or unauthorized alteration, disclosure or destruction of PII. However, no method of transmission over the Internet, or method of electronic storage, is 100% secure. Therefore, while we strive to use commercially acceptable means to protect your PII, we cannot guarantee its absolute security. We retain your PII only for as long as reasonably necessary for the purposes for which it was collected or to comply with any applicable legal or ethical reporting or document retention requirements. - -**7. Data Integrity.** The Company processes PII only for the purposes for which it was collected and in accordance with this Privacy Policy or any applicable service agreements. We review our data collection, storage and processing practices to ensure that we only collect, store and process the PII needed to provide or improve our Solution. We take reasonable steps to ensure that the PII we process is accurate, complete, and current, but we depend on our users to update or correct their PII whenever necessary. Nothing in this Privacy Policy is interpreted as an obligation to store information, and we may, at our own discretion, delete or avoid from recording and storing any and all information. - -**8. Your Rights**. - 8.1. **Right of Access and Rectification**. You have the right to know what PII we collect about you and to ensure that such data is accurate and relevant for the purposes for which we collected it. We allow our users the option to access and obtain a copy of their PII and to rectify such PII if it is not accurate, complete or updated. However, we may first ask you to provide us certain credentials to permit us to identify your PII. - 8.2. **Right to Delete PII or Restrict Processing**. You have the right to delete your PII or restrict its processing. We may postpone or deny your request if your PII is in current use for the purposes for which it was collected or for other legitimate purposes such as compliance with legal obligations. - 8.3. **Right to Withdraw Consent**. You have the right to withdraw your consent to the processing of your PII. Exercising this right will not affect the lawfulness of processing your PII based on your consent before its withdrawal. - 8.4. **Right of Data Portability**. Where technically feasible, you have the right to ask to transfer your PII in accordance with your right to data portability. - 8.5. **Right to Lodge Complaint**. You also have the right to lodge a complaint with a data protection supervisory authority regarding the processing of your PII. - -You may exercise the above rights by sending a request to [privacy@codefresh.io](mailto:privacy@codefresh.io). - -**9. Enforcement.** The Company regularly reviews its compliance with this Privacy Policy. Please feel free to direct any questions or concerns regarding this Privacy Policy or our treatment of PII by contacting us as provided above. When we receive formal written complaints it is the Company's policy to contact the complaining user regarding his or her concerns. We will cooperate with the appropriate regulatory authorities, including local data protection authorities, to resolve any complaints regarding the transfer of PII that cannot be resolved between the Company and an individual. - -**10. Changes to This Privacy Policy.** The Company may update this Privacy Policy. We will notify you about significant changes in the way we treat PII by sending a notice to the email address provided by you or by placing a prominent notice on the Solution. We encourage you to periodically review this Privacy Policy for the latest information about our privacy practices. - -**11. Consent To Processing.** By providing any PII to us pursuant to this Privacy Policy, all users, including, without limitation, users in the United States, Israel and member states of the European Union, fully understand and unambiguously consent to this Privacy Policy and to the collection and processing of such PII abroad. The server on which the Solution are hosted and/or through which the Solution are processed may be outside the country from which you access the Solution and may be outside your country of residence. Some of the uses and disclosures mentioned in this Privacy Policy may involve the transfer of your PII to various countries around the world that may have different levels of privacy protection than your country and may be transferred outside of the European Economic If there is a transfer of your PII outside the EEA we will, in the absence of an EC Adequacy decision relevant to the destination country or to the transfer, seek to rely on appropriate safeguards such as a valid Privacy Shield certification (in the case of a data transfer to a Privacy Shield certified US recipient): [https://www.privacyshield.gov/welcome](https://www.privacyshield.gov/welcome)), or enter into appropriate EC approved standard contractual clauses (see [http://ec.europa.eu/justice/data-protection/international-transfers/transfer/index\_en.htm](http://ec.europa.eu/justice/data-protection/international-transfers/transfer/index_en.htm)).By submitting your PII through the Solution, you consent, acknowledge, and agree that we may collect, use, transfer, and disclose your PII as described in this Privacy Policy. - -**12. Questions**.If you have any questions about this Privacy Policy or concerns about the way we process your PII, please contact us at [privacy@codefresh.io](mailto:privacy@codefresh.io). If you wish to delete all information regarding your use of the Solution, please contact us at: [ ](mailto:support@codefresh.io)[privacy @codefresh.io](mailto:support@codefresh.io). - -Last Date Updated: June 3, 2019 - diff --git a/_docs/terms-and-privacy-policy/sla.md b/_docs/terms-and-privacy-policy/sla.md deleted file mode 100644 index 9661c5ea..00000000 --- a/_docs/terms-and-privacy-policy/sla.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: "Service Commitment" -description: "Codefresh Cloud Service SLA" -group: terms-and-privacy-policy -toc: true ---- - -This Service Level Agreement (“**SLA**”) sets forth the terms and conditions under which Codefresh will provide service levels to Licensee and Users pursuant to the applicable Terms of Service in effect between Codefresh and Licensee and/or Users (the “**Terms**”). - -{::nomarkdown} -SLA Response Times -

                                                            -{:/} - -**1. DEFINITIONS**. The following definitions will apply to this SLA. All capitalized terms not defined in -this SLA will have the meaning given them in the Terms. - - -**1.1. “Downtime”** means any time that Codefresh-hosted portions of the Services are not available for access by Users. - - -**1.2. “Excused Downtime”** means any Downtime that is caused by (i) Maintenance, (ii) Licensee’s or Users’ telecommunications, internet, and network services, (iii) software, hardware, and services not controlled by Codefresh (including but not limited to third-party services used to provide the Services and software or sites that are accessed or linked through the Services), or (iv) any Force Majeure events or other matter beyond Codefresh’s reasonable control. - -**1.3. “Maintenance”** means maintenance to the Codefresh Services: (i) that occurs during the time period that Codefresh or its third-party service providers reserve for scheduled maintenance, which will be non-business hours (i.e. not during 9:00 a.m. to 6:00 p.m. Pacific Time, Monday through Friday, excluding those weekdays on which any federal holidays occurs in the United States) and will -not exceed four (4) hours per month; and (ii) any additional maintenance for which Codefresh provides Client with at least two (2) days advance notice. - - -**1.4. “Uptime”** means any time that is not Downtime. - - -**2. SERVICE AVAILABILITY**. - -**2.1. Target Uptime Percentage**. Codefresh will use commercially reasonable efforts to provide at least 99.5% Uptime in any calendar month (the “Target Uptime Percentage”). - - -**2.2. Achieved Uptime Percentage**. Actual Uptime percentage for each calendar month will be the quotient, expressed as (i) the total minutes potentially available for Uptime for that month minus total minutes of Downtime (other than Excused Downtime), divided by (ii) the total minutes potentially available for Uptime for that month minus any minutes of Excused Downtime (the “Achieved Uptime -Percentage”). - -**2.2. Right to Cancel**. - -{::nomarkdown} - -

                                                            (i)     In the event that the Achieved Uptime Percentage is less than the Target Uptime Percentage in three consecutive calendar months (a “Triggering Event”), Client shall have the right to cancel the agreement (the “Cancellation Right”) by providing Codefresh written notice of such cancellation (a “Cancellation Notice”) within 14 days after the last day of the calendar month in which the Triggering Event arose (the “Triggering Month”). If Client does not provide a Cancellation Notice within 14 days after the last day of the Triggering Month, the Cancellation Right arising with respect to the Triggering Event shall expire.

                                                            - - -

                                                            (ii)      Exercise of a Cancellation Right pursuant to this SLA will be Client’s sole and exclusive remedy, and the entire liability of Codefresh, for any failure to achieve the Target Uptime Percentage or any breach of this SLA. - - -{:/} - -**3. ERROR PRIORITIES AND RESPONSE TIMES**. - -Issues with the Codefresh Platform are classified based on severity of the issues and the required resources needed to resolve them. Codefresh will designate the severity and impact of the reported -issue as defined below and will use commercially reasonable efforts to respond and commence working on the applicable issue in accordance with the time periods below. - -{::nomarkdown} -3.1. SLA Response Times. -

                                                            -{:/} - -| Priority | Silver* | Gold* | Platinum** | -| -------------- | ---------------------------- | -----------------| -----------------| -|Urgent | 4 Hours | 2 Hours | 1 Hour | -| High | 6 Business Hours | 4 Business Hours | 2 Hours | -| Normal | 1 Business Day | 6 Business Hours | 3 Business Hours | -| Low | 1 Business Week | 2 Business Days | 8 Business Hours | - -***Silver and Gold:** 9AM - 5PM PST support. Production down events will be handled with utmost urgency. - -****Platinum:** 24/7 support. Customers with Platinum SLA can open urgent tickets in our Off-hours. - - -**3.2. Priority Definitions**. - -{: .table .table-bordered .table-hover} -| Priority Level | Description | -| -------------- | ---------------------------- | -| Urgent | Previously-working production services or functionality of the services are not available and there is no acceptable work around provided. | -| High | Disabled functionality, errors that result in a lack of significant functionality in the Services which prevent the user from accomplishing their testing with no acceptable work around provided.| -| Normal | Errors that cause previously-working non-critical features to malfunction. | -| Low | General questions, How-To’s, best practices questions, and feature requests.| - -[Severity Examples](https://support.codefresh.io/hc/en-us/articles/360018951039-Codefresh-SLA-definitions) - -**3.3. Support Channels**. - -{: .table .table-bordered .table-hover} -| Channel | Free | Silver | Gold | Platinum| -|----------------|-----------|--------- |------|---------| -| Support Portal | X | X | X | X | -| SLA | | X | X | X | -| Phone* | | | X | X | -| Slack** | | | | X | - -***Phone**: An initial reply by our answering service who will open a support ticket immediately which will be prioritized accordingly. This does include the 'Live support'/co-piloting/on boarding features offered by our Professional Services. - -****Slack:** Not intended to be used for critical issues. - -**4. VERSION HISTORY**. - -{: .table .table-bordered .table-hover} -| Version Number | Nature of Change | Date Approved | -| -------------- | ---------------------------- | ---------------------------- | -|2.0 | General updates | April 20, 2022 | -|1.1 | Added support information | April 7, 2021 | -|1.0 | Initial version | January 17, 2021 | - diff --git a/_docs/terms-and-privacy-policy/terms-of-service.md b/_docs/terms-and-privacy-policy/terms-of-service.md deleted file mode 100644 index 8e1f193c..00000000 --- a/_docs/terms-and-privacy-policy/terms-of-service.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: "" -description: "" -group: terms-and-privacy-policy -redirect_from: - - /docs/terms-of-service/ - - /docs/terms-and-privacy-policy/ -toc: true ---- -{::nomarkdown} - -


                                                            CODEFRESH, INC.
                                                            TERMS OF SERVICE
                                                            - -

                                                            By executing any applicable Order that references these Terms of Service (collectively the “Agreement”), the Parties to the Agreement acknowledge and agree that these binding standard terms and conditions (the “Terms”) shall apply:

                                                            - -

                                                            The following Terms dictate the Agreement between Codefresh, Inc. a Delaware corporation, or the Codefresh -entity set forth in the applicable Order if different, and its respective affiliates, -(collectively, “Codefresh”) and the licensee identified in the Agreement -(“Licensee” or “You”). Your right to access and use the Service, -whether or not an Agreement has been executed between Codefresh and Licensee -(or an entity that Licensee represents), is expressly conditioned on acceptance -of these Terms. By accessing or using the Services provided by Codefresh, -Licensee agrees to be bound by and abide by these Terms. These Terms shall apply -to all use by Licensee and Users of the Service.

                                                            - -
                                                            1. GENERAL TERMS.
                                                            - -

                                                            1.1.    Definitions. Capitalized terms not defined herein shall be given the meaning set forth in -the applicable Order.

                                                            - -

                                                            (i)     Account” means a user account created to access the -Codefresh platform.

                                                            - -

                                                            (ii)     Codefresh Content” means data, Documentation, reports, text, -images, sounds, video, and content made available through any of the Service.

                                                            - -

                                                            (iii)     Documentation” means the user documentation that Codefresh -makes generally available to users at https://codefresh.io/docs/.

                                                            - -

                                                            (iv)     Licensee” means an individual, entity, or other legal person -using the Service.

                                                            - -

                                                            (v)     Licensee Content” means all data, software, information, -text, audio files, graphic files, content, and other materials that you upload, post, deliver, provide, or otherwise transmit or store in connection with or relating to the Service submitted by or for Licensee to the Service or collected and processed by or for Licensee using the Service, excluding Codefresh Content and Non-Codefresh Applications.

                                                            - -

                                                            (vi)     Malicious Code” means code, files, scripts, agents, or programs intended to do harm, including, for example, viruses, worms, time bombs and Trojan horses.

                                                            - -

                                                            (vii)     Non-Codefresh Application(s)” means a web-based or offline software application that is provided by Licensee or a third party and interoperates with the Service.

                                                            - -

                                                            (viii)     Order” means a Service order form, other ordering document, web-based, or email-based ordering mechanism or registration process for the Service.

                                                            - -

                                                            (ix)     Service” means the Site, including related services provided through the site, or the Software.

                                                            - -

                                                            (x)     Site” means Codefresh’s website, located at https://support.codefresh.io.

                                                            - -

                                                            (xi)     SLA” means the “service level agreement” in effect as of the Order’s Effective Date, which can be found at https://codefresh.io/docs/docs/terms-and-privacy-policy/privacy-policy/.

                                                            - -

                                                            (xii)     Software” means any software developed and made available to Licensee as set forth in an applicable Order, which may include Codefresh’s build, test, and deployment docker container software tools, services, and related technologies.

                                                            - -

                                                            (xiii)     User” means an individual who is authorized by Licensee to use the Service, for whom Licensee (or Codefresh at Licensee’s request) has supplied a user identification and password either manually or using a Non-Codefresh Application. Users may include Licensee’s employees, consultants, contractors, agents, and third parties that Licensee transacts business with.

                                                            - -

                                                            1.2.    Codefresh may provide its Services to you through the Site or on-premises as set forth in an applicable Order. By entering into an Order or otherwise downloading, accessing, or using the Service, Licensee unconditionally accepts and agrees to, and represents that it has the authority to bind itself and its affiliates -to, all of these Terms.

                                                            - - - -
                                                            2. CLOUD-BASED SERVICES.
                                                            - -

                                                            2.1.    Scope of Service. For all cloud-based Services provided via Codefresh’s remote platform hereunder (such platform, the “Codefresh Cloud”), the “Scope” shall mean both the authorized number of Users and number of Cloud Credits (as defined below) set forth in the applicable Order.

                                                            - -

                                                            2.2.    Cloud Credits. Licensee may purchase Cloud Credits, which allow Users to use the Service on Codefresh Cloud in a specific capacity (each such unit, a “Cloud Credit”). Cloud Credits are available during the Term and expire upon termination. Cloud Credits are not redeemable for cash and are not refundable as cash under any circumstances. Cloud Credits are not transferable and may only be applied to Licensee’s account. Cloud Credits usage will be calculated based on the infrastructure size Licensee uses, as set out in the applicable Order, and as detailed in the below chart:

                                                            - -
                                                            - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                                                            Machine SizeCPUMemoryCredit/minute
                                                            S11 GB5
                                                            M24 GB10
                                                            L48 GB20
                                                            XL816 GB40
                                                            XXL1632 GB80
                                                            -
                                                            - -

                                                            -
                                                            3. ON-PREMISES SERVICES.
                                                            - -

                                                            3.1.    Scope of Services. For all Services provided to Licensee on-premises by Codefresh (the “On-Premises Services”), the “Scope” shall mean the authorized number of Users as set forth in the applicable Order.

                                                            - -

                                                            3.2.    Equipment Maintenance. Licensee shall be responsible for obtaining and maintaining any equipment and ancillary services or tools needed to connect to, access or otherwise use the Service, including, without limitation, modems, hardware, server, software, operating system, networking, web servers, long distance, and local telephone service (collectively, “Equipment”). Licensee shall be responsible for ensuring that such Equipment is compatible with the Service (and, to the extent applicable, the Software) and complies with all configurations and specifications set forth in the Documentation.

                                                            - - -
                                                            4. LICENSE AND RESTRICTIONS; LICENSEE AND USER OBLIGATIONS.
                                                            - -

                                                            4.1.    License. Subject to these Terms and payment of all fees described in an Order, during the Term Codefresh grants Licensee and each User a limited, non-sublicensable, non-exclusive, non-transferable license to use the object code of any Software and Codefresh Content solely in connection with the Service and any terms and procedures Codefresh may prescribe from time to time.

                                                            - -

                                                            4.2.    Restrictions. Subject to these Terms, Licensee and Users may access and use the Service and Codefresh Content only for lawful purposes. All rights, title, and interest in and to the Service and its components, Codefresh Content and all related intellectual property rights will remain with and belong exclusively to Codefresh. Licensee shall maintain the copyright notice and any other notices that appear on the Service on any copies and any media. Neither Licensee nor any User shall directly or indirectly (nor shall they allow any third party to) (i) modify, reverse engineer, or attempt to hack or otherwise discover any source code or underlying code, ideas, or algorithms of the Service (except to the extent that applicable law prohibits reverse engineering restrictions), (ii) sell, resell, license, sublicense, provide, lease, lend, use for timesharing, or service bureau purposes or otherwise use -or allow others to use the Service or Codefresh Content for the benefit of any third party, (iii) use the Service or Codefresh Content, or allow the transfer, transmission, export, or re-export of the Service or Content or portion thereof, in violation of any export control laws or regulations administered by the U.S. Commerce Department, OFAC, or any other government agency, (iv) use -the Service to store or transmit infringing, libelous, or otherwise unlawful or tortious material, or to store or transmit material in violation of third-party privacy or intellectual property rights, (v) use the Service to store or transmit Malicious Code, (vi) interfere with or disrupt the integrity or performance of the Service or its components, (vii) attempt to gain unauthorized access to the Service or its related systems or networks, (viii) permit direct or indirect access to or use of any Service or Codefresh Content in a way that circumvents a contractual usage limit, (ix) copy the Service or any part, feature, function or user interface thereof, access the Service in order to build a competitive product or service, or (x) use the Service for any purpose other than as expressly licensed herein.

                                                            - -

                                                            4.3.    Licensee Service Obligations. Any User of the Service must be thirteen (13) years old or older to use the Service. Licensee shall (i) ensure and be responsible for Users’ compliance with these Terms, (ii) be responsible for the quality and legality of Licensee Content and the means by which Licensee acquired Licensee Content, (iii) use commercially reasonable efforts to prevent unauthorized access to or use of the Service, and notify Codefresh promptly of any such -unauthorized access or use, (iv) use the Service only in accordance with the Codefresh’s Service documentation and applicable laws and government regulations, and (v) comply with terms of service of Non-Codefresh Applications with which Licensee uses the Service. Licensee and Users are responsible for maintaining the security of User’s accounts and passwords. Codefresh cannot and shall not be liable for any loss or damage from Licensee’s or any User’s failure to comply with this security obligation. Licensee and Users may not access the Service, if they are Codefresh’s direct competitor, except with Codefresh’s prior written consent. In addition, Licensee and Users may not access the Service for purposes of monitoring its availability, performance, or functionality, or for any other benchmarking or competitive purposes.

                                                            - -

                                                            4.4.    Enforcement. -Licensee shall promptly notify Codefresh of any suspected or alleged violation of these Terms and shall cooperate with Codefresh with respect to: (i) investigation by Codefresh of any suspected or alleged violation of these Terms and (ii) any action by Codefresh to enforce these Terms. Codefresh may, in its sole discretion, suspend or terminate any User’s access to the Service with or -without written notice to Licensee in the event that Codefresh reasonably determines that a User has violated these Terms. Licensee shall be liable for any violation of these Terms by any User.

                                                            - -

                                                            4.5.    Excess Use. Should Licensee use the Service beyond the applicable Scope (“Excess -Use”), Codefresh shall invoice Licensee for the Excess Use at Codefresh’s current pricing plans, such that Licensee is billed in accordance with the actual usage of the Service. To verify any Excess Use, and to extent Licensee uses On-Premises Services, Licensee will maintain, and Codefresh will be entitled to audit, any records relevant to Licensee's use of the Service -hereunder. Codefresh may audit such records on reasonable notice at Codefresh's cost (or if the audits reveal material non-compliance with these Terms, at Licensee's cost), including without limitation, to confirm number of Users and/or Excess Use.

                                                            - -
                                                            5. TRIAL LICENSE.
                                                            - -

                                                            From time to time, Licensee may be invited to try certain products at no charge for a free trial or evaluation period or if such products are not generally available to licensees (collectively, “Trial License”). Trial Licenses will be designated or identified as beta, pilot, evaluation, trial, or similar. Notwithstanding anything to the contrary herein, Trial Licenses are licensed for Licensee’s internal evaluation purposes only (and not for production use), are provided “as is” without warranty or indemnity of any kind and may be subject to additional terms. Unless otherwise -stated, any Trial Licenses shall expire thirty (30) days from the trial start date. Notwithstanding the foregoing, Codefresh may discontinue Trial Licenses at any time at its sole discretion and may never make any Trial Licenses generally available. Codefresh will have no liability for any harm or damage arising out of or in connection with any Trial Licenses.

                                                            - - - -
                                                            6. PROVISION OF THE SERVICE; SUPPORT.
                                                            - -

                                                            6.1.    Account Generation. As part of the registration process, each User shall generate a username and password for its Account either manually or through a Non-Codefresh Application. Each User is responsible for maintaining the confidentiality of their login, password, and Account and for all activities that occur under any such logins or the Account. Codefresh reserves the right to access Licensee’s and any User’s Account in order to respond to Licensee’s and Users’ requests for technical support. Codefresh has the right, but not the obligation, to monitor the Service, Codefresh Content, or Licensee Content, to the extent CodeFresh has access. Licensee further agrees that Codefresh may remove or disable any Codefresh Content at any time for any reason (including, but not limited to, upon receipt of claims or allegations from third parties or authorities relating to such Codefresh Content), or for no reason at all.

                                                            - -

                                                            6.2.     Accessing the Service. Licensee and its Users may enable or log in to the Service via certain Non-Codefresh Applications, such as GitHub. By logging into or directly integrating these Non-Codefresh Applications into the Service, Codefresh Users’ may have access to additional features and capabilities. To take advantage of such features and capabilities, Codefresh may ask Users to authenticate, register for, or log into Non-Codefresh Applications on the websites of their respective providers. As part of such integration, the Non-Codefresh Applications will provide Codefresh with access to certain information that Users have provided to such Non-Codefresh Applications, and Codefresh will use, store, and disclose such information in accordance with Codefresh’s Privacy Policy located at https://codefresh.com/privacy/. The manner in which Non-Codefresh Applications use, store, and disclose Licensee and User information is governed solely by the policies of the third parties operating the Non-Codefresh Applications, and Codefresh shall have no liability or responsibility for the privacy practices or other actions of any third-party site or service that may be enabled within the Service. In addition, Codefresh is not responsible for the accuracy, availability, or reliability of any information, content, goods, data, opinions, advice, or statements made available in connection with Non-Codefresh Applications. As such, Codefresh shall not be liable for any damage or loss caused or alleged to be caused by or in connection with use of or reliance on any such Non-Codefresh Applications. Codefresh enables these features merely as a convenience and the integration or inclusion of such features does not imply an endorsement or recommendation.

                                                            - -

                                                            6.3.    Support. Codefresh will provide Licensee with maintenance and support services in -accordance with and subject to the SLA. Community-based support is also available via Codefresh’s Discuss site located at https://discuss.codefresh.com (or -successor URL) for the Service at no additional charge. Upgraded support is available if purchased pursuant to an Order.

                                                            - - - -
                                                            7. FEES AND PAYMENT.
                                                            - -

                                                            7.1.    Licensee shall pay Codefresh the fees set forth in an Order in accordance with the terms set forth therein; provided that Codefresh may change any applicable fees upon thirty (30) days’ notice at any time and such new fees shall become effective for any subsequent renewal Term. All payments shall be made in U.S. dollars. -Any payments more than thirty (30) days overdue will bear a late payment fee of one and one-half percent (1.5%) per month or the maximum rate allowed by law, whichever is lower. In addition, Licensee will pay all taxes, shipping, duties, withholdings, and similar expenses, as well as all pre-approved out of pocket expenses incurred by Codefresh in connection with any consulting and/or support -services, promptly upon invoice. If Licensee is paying any fees by credit card, Licensee shall provide Codefresh complete and accurate information regarding the applicable credit card. Licensee represents and warrants that all such information is correct and that Licensee is authorized to use such credit card. Licensee will promptly update its account information with any changes (for -example, a change in billing address or credit card expiration date) that may occur. Licensee hereby authorizes Codefresh to bill such credit card in advance on a periodic basis in accordance with these Terms and the applicable Order, and Licensee further agrees to pay any charges so incurred.

                                                            - -

                                                            7.2.    For any upgrade in a subscription level for a month-to-month service plan, Codefresh shall automatically charge Licensee the new subscription fee, effective as of the date the service upgrade is requested and for each subsequent one-month recurring cycle pursuant to the billing method applicable to Licensee. If Codefresh is providing Licensee the Service pursuant to a yearly service plan, Codefresh will immediately charge Licensee any increase in subscription level plan cost pursuant to the billing method applicable to -Licensee, prorated for the remaining Term of Licensee’s yearly billing cycle; provided, however, any decrease in a subscription level plan cost shall only take effect upon the renewal date of the then current yearly service plan. Licensee’s downgrading its subscription level may cause the loss of features or capacity of Licensee’s Account. Codefresh does not accept any liability for such loss.

                                                            - -

                                                            7.3.If any amount owing by Licensee under these Terms for the Service is thirty (30) or more days overdue (or ten (10) or more days overdue in the case of amounts Licensee has authorized Codefresh to charge to Licensee’s credit card), Codefresh may, in its sole discretion and without limiting its other rights and remedies, suspend Licensee’s and any User’s access to the Service and/or otherwise limit the functionality of the Service until such amounts are paid in full.

                                                            - -

                                                            7.4.    Licensee agrees that its purchases are not contingent on the delivery of any future functionality or features, or dependent on any oral or written public comments made by Codefresh regarding future functionality or features.

                                                            - - - -
                                                            8. TERM; EXPIRATION AND TERMINATION.
                                                            - -

                                                            8.1.    These Terms shall continue in effect for the initial term and any renewal term as specified in an Order (collectively, the “Term”). If either party materially breaches these Terms, the other party shall have the right to terminate the applicable Order and, in the event that no Order exists, these Terms (and, in each case, all licenses granted herein) upon thirty (30) days (ten (10) days in the case of non-payment and immediately in the case of a material breach) written notice of any such breach, unless such breach is cured during such notice period. In the case of a free trial or Codefresh otherwise providing the Service at no cost to a Licensee, Codefresh shall have, upon Licensee or any -Users failing to use the Service for more than six (6) consecutive months, the right, in its sole discretion, to terminate all User Accounts of Licensee and terminate Licensee’s and all Licensee’s Users’ access to and use of the Service without notice. Upon expiration or termination of an Order or these Terms, Licensee shall immediately be unable access and use the Service, all Licensee Content -may be deleted from the Service at Codefresh’s sole discretion (such information cannot be recovered once Licensee’s Account or any User Account is -terminated) and Licensee shall return or destroy all copies of all Codefresh Content and all portions thereof in Licensee’s possession and so certify to -Codefresh, if such certification is requested by Codefresh. Any provision of these Terms that, by its nature and context is intended to survive, including provisions relation to payment of outstanding fees, confidentiality, warranties, and limitation of liability, will survive termination of these Terms.

                                                            - -

                                                            8.2.    Codefresh will promptly terminate without notice the accounts of Users that are determined by Codefresh to be “repeat infringer(s).” A repeat infringer is a User who has been notified of infringing activity more than twice and/or has had Licensee Content or Non-Codefresh Applications removed from the Service more -than twice.

                                                            - -
                                                            9. WARRANTIES; DISCLAIMERS.
                                                            - -

                                                            Codefresh represents and warrants that the Service will function in substantial compliance with the applicable Documentation. In order to be entitled to any remedy based on a purported breach of the foregoing representation and warranty, Licensee must inform Codefresh of the purported deficiency in the Service within thirty (30) days of the day on which Licensee first -becomes aware of the condition giving rise to such claim. EXCEPT AS EXPRESSLY SET FORTH HEREIN, THE SERVICE, SITE, CODEFRESH CONTENT, AND ALL SERVER AND NETWORK COMPONENTS ARE PROVIDED ON AN “AS IS” AND “AS AVAILABLE” BASIS. CODEFRESH EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY, -TITLE, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. LICENSEE AND USERS ACKNOWLEDGE THAT CODEFRESH DOES NOT WARRANT THAT THE SERVICE WILL BE -UNINTERRUPTED, TIMELY, SECURE, ERROR-FREE OR VIRUS-FREE, NOR DOES CODEFRESH MAKE ANY WARRANTY AS TO THE RESULTS THAT MAY BE OBTAINED FROM USE OF THE SERVICE, AND NO INFORMATION, ADVICE, OR SERVICES OBTAINED BY LICENSEE OR USERS FROM CODEFRESH OR THROUGH THE SERVICE SHALL CREATE ANY WARRANTY NOT EXPRESSLY STATED IN THESE TERMS. THE SERVICE MAY BE TEMPORARILY UNAVAILABLE FOR SCHEDULED MAINTENANCE OR FOR UNSCHEDULED EMERGENCY MAINTENANCE, EITHER BY CODEFRESH OR BY THIRD-PARTY PROVIDERS, OR BECAUSE OF CAUSES BEYOND CODEFRESH’S REASONABLE CONTROL. CODEFRESH SHALL USE REASONABLE EFFORTS TO PROVIDE ADVANCE NOTICE OF ANY SCHEDULED SERVICE DISRUPTION.

                                                            - -
                                                            10. INDEMNIFICATION.
                                                            - -

                                                            10.1. Licensee will indemnify, defend and hold harmless Codefresh and its officers, directors, employee and agents, from and against any third-party claims, disputes, demands, liabilities, damages, losses, and costs and expenses, including, without limitation, reasonable legal and professional fees, arising out of or in any way connected with (i) Licensee’s or User’s access to or use of the Service that is in violation of law or this Agreement, or (ii) the Licensee Content as provided to Codefresh that is in violation of law or this Agreement, provided that Codefresh: (a) promptly notifies Licensee in writing of the claim; (b) grants Licensee sole control of the defense and settlement of the claim; and (c) provides Licensee, at Licensee’s expense, with all assistance, information and authority reasonably required for the defense and settlement of the claim.

                                                            - -

                                                            10.2 Codefresh will indemnify, defend and hold harmless Licensee and its officers, directors, employee and agents, from and against any claims, disputes, demands, liabilities, damages, losses, and costs and expenses, including, without limitation, reasonable legal and professional fees, to the extent that it is based upon a third-party claim that the Service, as provided by under this Agreement and used within the scope of this Agreement, infringes or misappropriates any intellectual property right in any jurisdiction, and will pay any costs, damages and reasonable attorneys’ fees attributable to such claim that are awarded against Licensee, provided that Licensee: (i) promptly notifies Codefresh in writing of the claim; (ii) grants Codefresh sole control of the defense and settlement of the claim; and (iii) provides Codefresh, at Codefresh’s expense, with all assistance, information and authority reasonably required for the claim.If use of any of the Codefresh Content and/or the Service is, or in Codefresh’s reasonable opinion is likely to be, the subject of a claim specified in this Section, then Codefresh may, at its sole option and expense: (a) procure for Licensee the right to continue using the Codefresh Content and/or the Service; (b) replace -or modify the Codefresh Content and/or the Service so that it is non-infringing while maintaining substantially equivalent in function to the original Codefresh Content and/or the Service; or (c) if options (a) and (b) above cannot be accomplished despite Codefresh’s reasonable efforts, then Codefresh or Licensee may terminate this Agreement and Codefresh will provide pro rata refund of unused/unapplied fees paid in advance for any applicable subscription term. THE PROVISIONS OF -THIS SECTION 10.2 SET FORTH CODEFRESH’S SOLE AND EXCLUSIVE OBLIGATIONS, AND LICENSEE’S SOLE AND EXCLUSIVE REMEDIES, WITH RESPECT TO INDEMNIFICATION OBLIGATIONS FOR INFRINGEMENT OR MISAPPROPRIATION OF INTELLECTUAL PROPERTY RIGHTS OF ANY KIND. - -

                                                            11. LIMITATION OF LIABILITY.
                                                            - -

                                                            EXCEPT FOR A LIABILITY ARISING FROM SECTION 4.2 OR A PARTY’S INDEMNITY OBLIGATIONS SET FORTH IN SECTION 10, EACH PARTY’S LIABILITY UNDER THIS AGREEMENT SHALL BE LIMITED TO THE FEES PAID OR PAYABLE BY LICENSEE TO CODEFRESH IN THE TWELVE (12) MONTHS PRECEDING THE EVENT GIVING RISE TO THE LIABILITY. THE PROVISIONS OF THIS SECTION SHALL APPLY WHETHER OR NOT THE LICENSEE HAS BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGE, AND EVEN IF AN EXCLUSIVE REMEDY SET FORTH HEREIN IS FOUND TO HAVE FAILED OF ITS ESSENTIAL PURPOSE. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY (WHETHER IN CONTRACT, TORT OR OTHERWISE) SHALL EITHER PARTY BE LIABLE TO THE OTHER PARTY, ANY USER, OR ANY THIRD-PARTY FOR ANY INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, CONSEQUENTIAL OR PUNITIVE DAMAGES, INCLUDING LOST PROFITS, LOST SALES OR BUSINESS, LOST DATA, GOODWILL, OR OTHER INTANGIBLE LOSSES. THE PARTIES HAVE RELIED ON THESE LIMITATIONS IN DETERMINING WHETHER TO ENTER INTO THESE TERMS. IF APPLICABLE LAW DOES NOT ALLOW FOR CERTAIN LIMITATIONS OF LIABILITY, SUCH AS THE EXCLUSION OF IMPLIED WARRANTEES OR LIMITATION OF LIABILITY FOR INCIDENTAL OR CONSEQUENTIAL DAMAGES, THE PROVISIONS OF THIS SECTION 11 SHALL APPLY TO THE MAXIMUM EXTENT ALLOWABLE UNDER SUCH APPLICABLE LAW.

                                                            - -
                                                            12. PROPRIETARY RIGHTS; LIMITED LICENSES TO CODEFRESH.
                                                            - -

                                                            12.1. Intellectual Property Rights. Subject to the limited licenses expressly granted hereunder, Codefresh and its licensors reserve their respective right, title, and interest in and to the Service, including all of Codefresh’s and its licensors related intellectual property rights related to the Service (the “Intellectual Property Rights”). No rights are granted to Licensee hereunder other than as expressly set forth herein. Codefresh shall retain all right, title, and interest in and to Intellectual Property Rights, including any Software, all improvements, enhancements, modifications, and derivative works thereof, and all related Intellectual Property Rights.

                                                            - -

                                                            12.2.  License to Host Licensee Content. Licensee hereby grants Codefresh a worldwide, non-exclusive, royalty-free, fully paid, sublicensable, limited-term license to host, copy, transmit and display Licensee Content that Licensee or any User posts to the Service, solely as necessary for Codefresh to provide the Service in accordance with these Terms. Subject to the limited licenses granted herein, Codefresh acquires no right, title or interest from Licensee or Licensee’s licensors under these Terms in or to Licensee Content.

                                                            - -

                                                            12.3. License to Use Feedback. Licensee hereby grants to Codefresh a worldwide, perpetual, irrevocable, royalty-free license to use and incorporate into the Service any suggestion, enhancement request, recommendation, correction, or other feedback provided by Licensee or Users relating to the operation of the Service.

                                                            - -
                                                            13. CONFIDENTIALITY.
                                                            - -

                                                            Any technical, financial, business or other information provided by one party (the “Disclosing Party”) to the other party (the “Receiving Party”) and designated as confidential or proprietary or that reasonably should be understood to be confidential given the nature of the information and the circumstances of disclosure (“Confidential Information”) shall be held in confidence and not disclosed and shall not be used except to the extent necessary to carry out the Receiving Party’s obligations or express rights hereunder, except as otherwise authorized by the -Disclosing Party in writing. For clarity, the Service and Codefresh Content shall be deemed Confidential Information of Codefresh whether or not otherwise designated as such. The Receiving Party shall use the same degree of care that it uses to protect the confidentiality of its own confidential information of like kind (but not less than a reasonable standard of care). These obligations -will not apply to information that (i) was previously known by the Receiving Party, as demonstrated by documents or files in existence at the time of disclosure, (ii) is generally and freely publicly available through no fault of the Receiving Party, (iii) the Receiving Party otherwise rightfully obtains from third parties without restriction, or (iv) is independently developed by the -Receiving Party without reference to or reliance on the Disclosing Party’s Confidential Information, as demonstrated by documents or files in existence at the time of disclosure. The Receiving Party may disclose Confidential Information of the Disclosing Party to the extent compelled by law to do so, provided the Receiving Party gives the Disclosing Party prior notice of the -compelled disclosure (to the extent legally permitted) and reasonable assistance, at the Disclosing Party’s cost, if the Disclosing Party wishes to contest the disclosure. If the Receiving Party is compelled by law to disclose the Disclosing Party’s Confidential Information as part of a civil proceeding to which the Disclosing Party is a party, and the Disclosing Party is not contesting the disclosure, the Disclosing Party will reimburse the Receiving Party for its reasonable cost of compiling and providing secure access to that Confidential Information. In the event that such protective order or other remedy is not obtained, the Receiving Party shall furnish only that portion of the Confidential Information that is legally required and use commercially reasonable efforts to obtain assurance that confidential treatment will be accorded the Confidential Information.

                                                            - -
                                                            14. PROTECTION OF CONFIDENTIAL LICENSEE CONTENT; SECURITY.
                                                            - -

                                                            14.1. Codefresh shall maintain administrative, physical, and technical safeguards for protection of the security, confidentiality and integrity of Licensee Content that is Licensee’s Confidential Information (“Confidential Licensee Content”). Those safeguards shall include, but will not be limited to, measures for preventing access, use, modification, or disclosure of Confidential Licensee Content by Codefresh’s personnel except (a) to provide the Service and prevent or address service or technical problems, (b) as compelled by law in accordance with Section 13 (Confidentiality) above, or (c) as Licensee expressly permits in writing.

                                                            - -

                                                            14.2. Licensee understands that the operation of the Service, including Licensee Content, may involve (i) transmissions over various networks; (ii) changes to conform and adapt to technical requirements of connecting networks or devices; and (iii) transmission to Codefresh’s third-party vendors and hosting partners solely to -provide the necessary hardware, software, networking, storage, and related technology required to operate and maintain the Service. Accordingly, Licensee -acknowledges that Licensee bears sole responsibility for adequate backup of Licensee Content. Codefresh will have no liability to Licensee for any unauthorized access or use of any of Licensee Content, or any corruption, deletion, destruction, or loss of any of Licensee Content.

                                                            - -
                                                            15. DISPUTE RESOLUTION.
                                                            - -

                                                            15.1. This Agreement and any action related thereto will be governed by the laws of the State of California without regard to its conflict of laws provisions. Licensee and Codefresh irrevocably consent to the jurisdiction of, and venue in, the state or federal courts located in the County of San Francisco, California for any disputes arising -under this Agreement, provided that the foregoing submission to jurisdiction and venue shall in no way limit the obligation to arbitrate disputes set forth in Section 15.2.

                                                            - -

                                                            15.2. Except for actions to protect a party’s intellectual property rights and to enforce an arbitrator’s decision hereunder, any controversy or claim arising out of or relating to this Agreement, or the breach thereof, shall be settled by arbitration administered by the American Arbitration Association (“AAA”) under its Commercial Arbitration Rules, or such applicable substantially equivalent rules as the AAA may adopt that are then in effect (the “AAA Rules”), and judgment on the award rendered by the arbitrator(s) may be -entered in any court having jurisdiction thereof. There shall be one arbitrator, and such arbitrator shall be chosen by mutual agreement of the parties in accordance with AAA Rules. The arbitration shall be conducted remotely to the extent practicable and otherwise in San Francisco, California. The arbitrator shall apply the laws of the State of California to all issues in dispute. The -controversy or claim shall be arbitrated on an individual basis and shall not be consolidated in any arbitration with any claim or controversy of any other party. The findings of the arbitrator shall be final and binding on the parties and may be entered in any court of competent jurisdiction for enforcement. Enforcements of any award or judgment shall be governed by the Federal Arbitration Act.

                                                            - -
                                                            16. MISCELLANEOUS.
                                                            - -

                                                            16.1. Assignment. Neither party may assign this Agreement without the other party’s prior written consent and any attempt to do so will be void, except that either party may assign this Agreement, without the other party’s consent, to a successor or acquirer, as the case may be, in connection with a merger, acquisition, sale of all or substantially all of such party’s assets or substantially similar transaction, provided, however, that Licensee may not assign this Agreement to a competitor or customer of Codefresh without Codefresh’s prior written consent. Subject to the foregoing, this Agreement will bind and benefit the parties and their respective successors and assigns.

                                                            - -

                                                            16.2. Electronic Signature. The parties consent to using electronic signatures to sign this Agreement and to be legally bound to their electronic signatures. The parties acknowledge that his or her electronic signature will have the same legal force and effect as a handwritten signature.

                                                            - -

                                                            16.3. Fees. In any action between the parties seeking enforcement of any of the terms and provisions of this Agreement, the prevailing party in such action shall be awarded, in addition to damages, injunctive or other relief, its reasonable costs and expenses, not limited to taxable costs, reasonable attorney’s fees, -expert fees, and court fees and expenses.

                                                            - -

                                                            16.4. No Partnership or Joint Venture. The Agreement is not intended to be, and shall not be construed as, an agreement to form a partnership, agency relationship, or a joint venture between the parties. Except as otherwise specifically provided in the Agreement, neither party shall be authorized to act as an agent of or -otherwise to represent the other party.

                                                            - -

                                                            16.5. Headings. Captions to, and headings of, the articles, sections, subsections, paragraphs, or subparagraphs of this Agreement are solely for the convenience of the parties, are not a part of this Agreement, and shall not be used for the interpretation or determination of the validity of this Agreement or any provision hereof.

                                                            - -

                                                            16.6. Publicity. Licensee grants Codefresh the right to use Licensee’s company name and logo as a reference for marketing or promotional purposes on Codefresh’s website and in other public or private communications with its existing or potential customers, subject to Licensee’s standard trademark usage guidelines as provided -to Codefresh from time-to-time.

                                                            - -

                                                            16.7. No Election of Remedies. Except as expressly set forth in this Agreement, the exercise by either party of any of its remedies under this Agreement will not be deemed an election of remedies and will be without prejudice to its other remedies under this Agreement or available at law or in equity or otherwise.

                                                            - -

                                                            16.8. Notices. All notices required or permitted under this Agreement will be in writing, will reference this Agreement, and will be deemed given: (i) when delivered personally; (ii) one (1) business day after deposit with a nationally-recognized express courier, with written confirmation of receipt; (iii) three (3) business days after having been sent by registered or certified mail, return receipt requested, postage prepaid; or (iv) twenty-four (24) hours after having been sent via e-mail to the contact person at the address listed in each Order (or if to Codefresh, at legal@codefresh.io) unless a party notifies the other party in -writing of a change to the contact person and/or the contact person’s contact information. Email shall not be sufficient for notices of termination or an indemnifiable claim. All such notices will be sent to the addresses set forth above or to such other address as may be specified by either party to the other party in accordance with this Section.

                                                            - -

                                                            16.9. Waiver & Severability. The failure by either party to enforce any provision of this Agreement will not constitute a waiver of future enforcement of that or any other provision. The waiver of any such right or provision will be effective only if in writing and signed by a duly authorized representative of each party. If any provision of this Agreement is held invalid or unenforceable by a court of competent jurisdiction, the remaining provisions of this Agreement will remain in full force and effect, and the provision affected will be construed so as to be enforceable to the maximum extent permissible by law.

                                                            - -

                                                            16.10. Entire Agreement. This Agreement, together with the SLA and any subsequently executed Order(s), constitutes the complete and exclusive agreement of the parties with respect to its subject matter and supersedes all prior understandings and agreements, whether written or oral, with respect to its -subject matter. Any waiver, modification, or amendment of any provision of this Agreement will be effective only if in writing and signed by the parties hereto.

                                                            - -

                                                            16.11. Force Majeure. Neither party will be responsible for any failure or delay in its performance under this Agreement due to causes beyond its reasonable control, including, but not limited to, labor disputes, strikes, lockouts, shortages of or inability to obtain labor, energy, raw materials or supplies, war, acts of terror, riot, acts of God or governmental action.

                                                            - -

                                                            16.12. Counterparts. This Agreement may be executed in counterparts, each of which will be deemed an original, but all of which together will constitute one and the same instrument.

                                                            - -

                                                            16.13.  Updating Terms. As its business evolves, Codefresh may change these Terms (not including any then-current, active Orders) from time to time. Licensee may review the most current version of these Terms at any time by visiting https://codefresh.io/docs/docs/terms-and-privacy-policy/terms-of-service/ and by visiting the most current versions of the other pages that are referenced in the Agreement. All changes will become effective upon posting of the change. If Client (or any User) accesses or uses the Services after the effective date, that use will constitute Client’s acceptance of any revised terms and conditions. Codefresh may change these Terms from time to time by providing Licensee and Users at least thirty (30) days’ notice either by emailing the email address associated with Licensee’s or User’s account or by posting a notice on the Service.

                                                            - -

                                                             

                                                            - -

                                                            Terms last updated May 12, 2022

                                                            -{:/} - diff --git a/_docs/troubleshooting/common-issues.md b/_docs/troubleshooting/common-issues.md deleted file mode 100644 index 525a88da..00000000 --- a/_docs/troubleshooting/common-issues.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "Common issues" -description: "" -group: troubleshooting -redirect_from: - - /docs/common-issues/ - - /docs/troubleshooting/ -toc: true ---- -In this section, we offer helpful information about issues you may encounter and questions you might have. - -## General usage issues - -- [Non admin users can are thrown out back to classic Codefresh]({{site.baseurl}}/docs/troubleshooting/common-issues/non-admin-users-support/) - - - - - - - diff --git a/_docs/troubleshooting/common-issues/non-admin-users-support.md b/_docs/troubleshooting/common-issues/non-admin-users-support.md deleted file mode 100644 index 91f8e648..00000000 --- a/_docs/troubleshooting/common-issues/non-admin-users-support.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: "Non admin users can are thrown out back to classic Codefresh" -description: "" -group: troubleshooting -sub_group: common-issues -toc: true ---- - -For the time being only admin users of your account will be able to use Codefresh Argo Platform. - -The reason for this is that the roles and permissions model has not yet been released and thus all users are able to view all entities. - -The roles and permissions model will be released before going into GA. diff --git a/_docs/troubleshooting/runtime-issues.md b/_docs/troubleshooting/runtime-issues.md deleted file mode 100644 index 31b2a2c0..00000000 --- a/_docs/troubleshooting/runtime-issues.md +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: "Runtimes" -description: "" -group: troubleshooting -toc: true ---- - - -#### Pre-installation checks or runtime collision check failed - -**Possible cause** - You have Argo Project components from other installations in your cluster. - -**Possible actions** - 1. Uninstall the Codefresh runtime. - 1. Remove the Argo Project components from your cluster. - 1. Reinstall the Codefresh runtime. - -#### Pre-installation error: please upgrade to the latest cli version: `v` - -**Possible cause** - Codefresh has a new CLI version. - -**Possible actions** -Run the appropriate command to upgrade to the latest version: - - {: .table .table-bordered .table-hover} -| Download mode | OS | Command | -| -------------- | ----------| ----------| -| `Curl` | MacOS-x64 | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-amd64.tar.gz | tar zx && mv ./cf-darwin-amd64 /usr/local/bin/cf && cf version`| -| | MacOS-m1 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-darwin-arm64.tar.gz | tar zx && mv ./cf-darwin-arm64 /usr/local/bin/cf && cf version` | -| | Linux - X64 |`curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-amd64.tar.gz | tar zx && mv ./cf-linux-amd64 /usr/local/bin/cf && cf version` | -| | Linux - ARM | `curl -L --output - https://github.com/codefresh-io/cli-v2/releases/latest/download/cf-linux-arm64.tar.gz | tar zx && mv ./cf-linux-arm64 /usr/local/bin/cf && cf version`| -| `Brew` | N/A| `brew tap codefresh-io/cli && brew install cf2`| - -#### Failed to bootstrap repository: authentication required -**Possible cause** -The Git token provided for runtime installation is not valid. - -**Possible actions** -* Make sure the token: - * Has a valid expiration date. - * Scope includes `repo` and `admin-repo.hook` - -#### Failed adding git integration <...> 404 not found - -**Possible cause** -Another process is probably occupying the address you provided as your `ingress-host`. - -**Possible actions** -* Terminate the process to free up the address. - -#### Failed to create default git integration:...failed making a graphql API call...the HTTP request failed - -**Possible cause** - The Ingress controller does not have a valid SSL certificate. The certificate must be from a Certificate Authority. - -**Possible actions** -1. Get a valid CA-signed certificate for the Ingress controller. -1. Run the runtime installation. - -_OR_ - -* Continue with the runtime installation in `insecure` mode: - - `cf intg git add default --api-url https://api.github.com --runtime --insecure` - where: - `` is the name of the runtime installation. - -#### Failed to create default git integration:...GraphQL Error (Code: 401): ("response":{"error":"Unauthorized: Invalid token."status":401, "headers":{}}..) -**Possible cause** - The ingress host specified does not point to the cluster selected for runtime installation. - -**Possible actions** -Make sure the ingress host points to the cluster on which the runtime is installed. - - -#### Timeout error, either for applications or while waiting for the installation to complete -**Possible cause** - `Cron-executer` has identified that not all your applications are synced and healthy. - Codefresh runtime installation syncs app status at 10-minute intervals. The timeout occurs if after the last status sync, at least one application is either not synced or healthy. - -**Possible actions** - -Without terminating the runtime install, do the following: - -1. In the Argo CD UI, check the status of your apps: - * Port forward your `argo-cd server` pod using default 8080 ports. - * Access it via `localhost:8080` with the Username `admin`, and Password which is the decoded `argocd-initial-admin-secret` in your cluster. - > With `k9s`, in the `secrets` view, press `x`. -1. If all apps are perfectly synced and healthy, check the logs of the `cron-executor` service. -1. If the logs don't show a cause for the error, try freeing up resources: - * Free up memory with `docker system prune -a --volumes`. - * Increase the resources allocated to your cluster. \ No newline at end of file diff --git a/_docs/whats-new/whats-new.md b/_docs/whats-new/whats-new.md deleted file mode 100644 index aa22dce0..00000000 --- a/_docs/whats-new/whats-new.md +++ /dev/null @@ -1,1347 +0,0 @@ ---- -title: "What's new in Codefresh?" -description: "" -group: whats-new -redirect_from: - - /docs/whats-new/ -toc: true ---- - -Built on Argo, the world’s most popular and fastest-growing open source software delivery, Codefresh unlocks the full enterprise potential of Argo Workflows, Argo CD, Argo Events, and Argo Rollouts, providing a control-plane for managing them at scale. - - - -## October 2022 - -### Features & enhancements -{::nomarkdown} -
                                                            -{:/} - -#### Kubernetes version runtime support -We now support Kubernetes server versions 1.21 and higher. - -{::nomarkdown} -
                                                            -{:/} - -#### Request Routing Service for runtimes -We have changed the routing mechanism for hybrid runtimes. URL requests and webhooks are now routed through a new internal routing service instead of through the ingress controller. - -The change is effective from runtime version 0.0.543 and higher. If you already have runtimes installed, this change does not require any action from you, both to upgrade to the new runtime version or retain existing runtimes. Older runtimes continue to use the ingress controller for routing purposes. - -See [Hybrid runtime architecture]({{site.baseurl}}/docs/getting-started/architecture/#hybrid-runtime-architecture) and [Request Routing Service]({{site.baseurl}}/docs/getting-started/architecture/#request-routing-service). - -{::nomarkdown} -
                                                            -{:/} - -#### More Git providers for runtimes -Codefresh runtimes now support GitHub Enterprise, GitLab, and Bitbucket as Git providers, apart from GitHub, which is the default. - -When installing the first hybrid or hosted runtime for your account, you can define the Git provider of choice. Because Codefresh creates a configuration repository that is shared with subsequent runtimes in the same account, you cannot change the Git provider for a different runtime in the same account. - -Each Git provider requires runtime tokens with specific scopes and has specific installation requirements. Once installed, you can authorize access to the Git provider through OAuth or a personal access token. - -Note that GitLab cloud is not supported for hosted runtimes. - -See [Git provider and repo flags]({{site.baseurl}}/docs/runtime/installation/#git-provider-and-repo-flags). - -{::nomarkdown} -
                                                            -{:/} - -#### Turn off notifications for runtimes -Codefresh alerts you to runtimes that are insecure or have invalid or expired Git personal access tokens. You can turn off these notifications selectively for runtimes for which these alerts are less critical, such as non-production runtimes. - -The option is user-specific, and applies only to runtimes in the user's account. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-dismiss-runtime-notifications.png" - url="/images/whats-new/rel-notes-oct22-dismiss-runtime-notifications.png" - alt="Turn off notifications for selected runtime" - caption="Turn off notifications for selected runtime" - max-width="80%" -%} - -Runtimes with disabled notifications are prefixed with an icon as in the picture below. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-dimiss-notifications-indication.png" - url="/images/whats-new/rel-notes-oct22-dimiss-notifications-indication.png" - alt="Runtime with disabled notifications" - caption="Runtime with disabled notifications" - max-width="80%" -%} - -{::nomarkdown} -
                                                            -{:/} - -#### Rollout Player for deployments -Managing ongoing rollouts during a deployment is now simple with the Rollout Player. Clicking the rollout name in Timeline > Updated Services, displays both the visualization of the steps in the rollout and the Rollout Player. With the Rollout Player you can control individual steps in an ongoing rollout and even promote the rollout to a release. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct-22-rollout-player.png" - url="/images/whats-new/rel-notes-oct-22-rollout-player.png" - alt="Rollout Player" - caption="Rollout Player" - max-width="40%" -%} - - -The Rollput Player allows you to: -* Resume an indefinitley paused step -* Forward a step by skipping its execution -* Promote the rollout to deployment by skipping remaining pause, analysis - -{::nomarkdown} -
                                                            -{:/} - -#### Context menu for application resources -We have enhanced the functionality for application resources in the Current State tab with the context menu for resources. The options available differ according to the type of resource. - - - -**On-demand sync for individual application resources** -Sync is a context menu option available for all resources that track sync status. You can sync individual resources as needed or when out-of-sync without synchronizing or refreshing the application. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct-22-sync-context-menu.png" - url="/images/whats-new/rel-notes-oct-22-sync-context-menu.png" - alt="Sync option in resource context menu" - caption="Sync option in resource context menu" - max-width="50%" -%} - -**Rollout resource actions** -The context menu for `rollout` resource types have actions to control the rollout. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-rollout-context-menu.png" - url="/images/whats-new/rel-notes-oct22-rollout-context-menu.png" - alt="Context menu options for Rollout resource" - caption="Context menu options for Rollout resource" - max-width="50%" -%} - -{::nomarkdown} -
                                                            -{:/} - -#### Other enhancements - -**Git Sources as Application Type filter** -The list of filters for Application Type in the Applications dashboard includes the Git Source filter. Filtering by Git Source shows `Git Source Apps` which are applications created by Codefresh that store definitions of Argo Project resources. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-app-type-git-source.png" - url="/images/whats-new/rel-notes-oct22-app-type-git-source.png" - alt="Git Source as Application Type filter" - caption="Git Source as Application Type filter" - max-width="40%" -%} - - -**Manifests for Analysis Runs** -Analysis Run now shows the manifest in addition to the run results. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-oct22-analysis-run-manifest.png" - url="/images/whats-new/rel-notes-oct22-analysis-run-manifest.png" - alt="Manifest for Analysis Run" - caption="Manifest for Analysis Run" - max-width="40%" -%} - -{::nomarkdown} -
                                                            -{:/} - -### Bug fixes - -{::nomarkdown} -
                                                            -{:/} - -**Runtimes** - -* 500: Internal Server Error when adding cluster command to hosted runtime. -* Commit SHA link in Activity Log goes to the Home page instead of to the Commit URL for the Git provider. -* Ingress controller errors for cluster even when `skip-ingress` flag is defined. -* Retry mechanism requests cause delay in Git integration checks. -* For hosted runtimes, Git Source is not displayed though the Connect to Git provider step is marked as complete. -* No option to log out on selecting invalid authentication mode. -* Removing a managed cluster does not display any indication in Codefresh UI. -* Up-to-date runtimes display upgrade indication. - - -{::nomarkdown} -
                                                            -{:/} - -**Applications** -* Applications deleted in Git displayed as errors, or as Missing in Codefresh. -* Tagging/untagging favorite application breaks relationship to parent application. -* Application definitions validation for cluster URL that does not exist shows wrong entity type. -* Incorrect number of replicas for previous image in Applications dashboard. -* Mismatch between information reported for cluster and namespace in Applications dashboard and Images. -* Source link in Timeline tab redirects to incorrect branch. -* Missing Health indication for Argo Rollouts in Codefresh UI. - -{::nomarkdown} -
                                                            -{:/} - -**Delivery Pipelines and workflows** -* 100% CPU consumption for workflows with more than 20 nodes. -* Discard Changes button enabled when there are no changes. - - -## September 2022 - - - -### Features & enhancements -{::nomarkdown} -
                                                            -{:/} - -#### Enriched application header -Every application includes a header that highlights key information and links to key aspects of the application. For example, you can see both the current sync state and the result of the previous sync operation, with links to pull-out panels including additional details. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-app-header.png" - url="/images/whats-new/rel-notes-sep22-app-header.png" - alt="Application header for selected appplication" - caption="Application header for selected appplication" - max-width="80%" -%} - -#### Refresh and hard refresh to manage applications -Just as you can manually synchronize applications directly in Codefresh, you can now perform Refresh and Hard Refresh for applications. -In the Applications dashboard, both options are available in the context menu of each application. On selecting an application, you can see them on the top-right next to the Synchronize button. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-refresh-hardrefresh.png" - url="/images/whats-new/rel-notes-sep22-refresh-hardrefresh.png" - alt="Refresh/Hard refresh options for selected application" - caption="Refresh/Hard refresh options for selected application" - max-width="80%" -%} - - - -#### Click resume indefinitely paused rollouts -Argo Rollouts allows you to pause a rollout indefinitely and resume it manually instead of automatically after a fixed duration. Manually resuming a rollout is generally done through the CLI. -Codefresh provides you the option of resuming an indefinitely paused rollout directly from the Applications dashboard in Codefresh, with a single click. - -In the Timelines tab of the selected application, an ongoing rollout that is indefinitely paused displays the pause button. Resuming the rollout is as simple as clicking the pause button. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-resume-pause.png" - url="/images/whats-new/rel-notes-sep22-resume-pause.png" - alt="Resume indefinitley paused rollout" - caption="Resume indefinitley paused rollout" - max-width="60%" -%} - -#### Custom path for application resources -When creating applications, in addition to changing the name of the manifest, you can now also define the path for the manifest within the Git Source. Use the front slash (/) to add subfolders to the path. The resource is created in the Git Source you select, according to the path you defined. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-app-yaml-location.png" - url="/images/whats-new/rel-notes-sep22-app-yaml-location.png" - alt="Define location for application YAML" - caption="Define location for application YAML" - max-width="60%" -%} - - - -#### Events tab for applications -In the previous month's release, we added the Events panel displaying successful and events for the application. -For more visibility and easier access, the same Events tab is now displayed with the Current State, Timeline, Services, and Configuration tabs for the selected application. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-sep22-events-tab.png" - url="/images/whats-new/rel-notes-sep22-events-tab.png" - alt="Events tab for application" - caption="Events tab for application" - max-width="60%" -%} - - -### Bug fixes - -{::nomarkdown} -
                                                            -{:/} - -**Runtimes** -* Incorrect status for Hosted runtime when app-proxy is unreachable. -* Git provider not registered for hosted runtimes with Git Sources defined in the shared configuration repo. -* Authentication failure between platform and app proxy. -* Adding cluster to a runtime shows an error even when the cluster is added to the runtime. -* Duplicate dates in Activity Log notifications. -* Argo CD fails to connect to K8s 1.24 clusters. -* After uninstalling a runtime, argo-rollouts and rollout-reporter files remain for managed cluster remain in shared configuration repo. -* Deleted managed cluster shows as Unknown. - -{::nomarkdown} -
                                                            -{:/} - -**Applications** -* Health status does not change to progressing when previously degraded. -* Wrong git source reference -* Git Source applications in the Applications dashboard not reflected in the Runtimes > Git Source tab. -* Switching from YAML to form view after changing fields does not update validations. -* App details drawer crashes when application does not have resources. -* Missing namespace for resources. -* Full Screen does not work in Safari. -* Recreating an application with the same name as that of a deleted application displays incorrect data for rollouts in the Timeline tab. -* In the Timeline tab, data for a new release with long sync duration is assigned to the previous release. - - -## August 2022 - -### Features & enhancements - -#### GitHub Container Registry -In this release, we added support for GitHub Container Registry (GHCR), a popular container registry tool. The settings for GitHub Container registry integration are identical to that of the other container registry integrations: the integration name, the runtimes to share the integration with, and the domain, username, and token. -You also have the Test Connection option to test credentials before committing the changes. -Once defined, you can reference the integration by name in the CI platforms. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-github-cr.png" - url="/images/whats-new/rel-notes-aug22-github-cr.png" - alt="GitHub Container registry integration" - caption="GitHub Container registry integration" - max-width="70%" -%} - -See [GitHub Container registry]({{site.baseurl}}/docs/integrations/ci-integrations/container-registries/github-cr/). - -#### Labels and annotations for managed clusters -The Codefresh CLI supports labels and annotations for managed clusters. -When you add a managed cluster in Codefresh, you can optionally add labels and annotations with the `--labels` and the `--annotations` flags. Codefresh supports the standard key-value formats for both, with multiple items separated by `,`. K8s rules for labels and annotations are valid here as well. - -See [Adding a managed cluster with Codefresh CLI]({{site.baseurl}}/docs/runtime/managed-cluster/#add-a-managed-cluster-with-codefresh-cli), and [Adding a managed cluster with Kustomize]({{site.baseurl}}/docs/runtime/managed-cluster/#add-a-managed-cluster-with-kustomize). - -#### Event information for application resources -View events for application resources directly in Codefresh. -While the Applications dashboard flags errors in all applications at the global level, the Events tab isolates successful and failed events per resource within an application, useful for resources such as pods. - -Instead of having to navigate to Argo CD to view events for an application resource, clicking the resource in the Current State view in Codefresh displays the Events tab for that resource. Events are displayed in descending order, with the most recent event displayed first. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-events-tab.png" - url="/images/whats-new/rel-notes-aug22-events-tab.png" - alt="Events tab for application in Current State" - caption="Events tab for application in Current State" - max-width="60%" -%} - -#### Quick View for applications -Similar to the detailed views for application resources, Codefresh offers a detailed view also for the application itself. -The Quick View for an application, collates definition, deployment, and event information, in the same location. The information is grouped into tabs for intuitive viewing: Summary, Metadata, Parameters, Sync Options, Manifest, and Events (as in the picture below). - -Easily access the Quick View either by selecting Quick View from the application’s context menu in the Applications dashboard, or by clicking the application resource in the Current State view. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-quickview-events.png" - url="/images/whats-new/rel-notes-aug22-quickview-events.png" - alt="Application Quick View: Events tab" - caption="Application Quick View: Events tab" - max-width="40%" -%} - -See [Application Quick View]({{site.baseurl}}/docs/deployment/applications-dashboard/#application-quick-view). - - - -#### Usability enhancements for applications -**Context menu for applications** -Every application in the Applications dashboard includes a new context menu with access to frequently-used and useful options such as Quick View, synchronize, and edit applications. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-new-app-nav.png" - url="/images/whats-new/rel-notes-aug22-new-app-nav.png" - alt="Tab order on application drilldown" - caption="Tab order on application drilldown" - max-width="70%" -%} - - -**Validations before commit with intuitive error message** -Codefresh validates Source, Destination, and Advanced Settings such as the Argo CD Project, when you create or update applications, _before_ committing the changes. -For easy identification, the section with the error is also highlighted in the Form, not only in the YAML manifest. For example, if the Revision or Path is missing in the General settings, the section is highlighted in red and the message displayed includes details on the possible reasons for the error. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-aug22-app-validation-errors.png" - url="/images/whats-new/rel-notes-aug22-app-validation-errors.png" - alt="Validation errors in Form mode for application" - caption="Validation errors in Form mode for application" -max-width="60%" -%} - -#### Miscellaneous changes - -{: .table .table-bordered .table-hover} -| Item | Description | -| ---------- | -------- | -| `CF_HOST` | Deprecated from v 0.0.460 and higher in CI integrations. Recommend using `CF_RUNTIME_NAME` instead. See [CI integrations argument reference]({{site.baseurl}}/docs/integrations/ci-integrations/#ci-integration-argument-reference). | -| `GHCR_GITHUB_TOKEN_AUTHENTICATION` | New value for `CF_CONTAINER_REGISTRY_INTEGRATION` argument. Can be selected for GitHub Container (GHCR) registries even when you don’t have a GHCR integration in Codefresh. See [GitHub Action-Codefresh integration arguments]({{site.baseurl}}/docs/integrations/ci-integrations/github-actions/#github-action-codefresh-integration-arguments).| - - - -### Bug fixes - -**Runtimes** -* Uninstalling runtime does not remove the integrations shared with the runtimes. -* Uninstalling a hosted or hybrid runtime does not remove it from the shared configuration repository. -* Unable to install Argo Rollouts on clusters with long cluster names. -* Empty Argo CD logs with "http internal error" in Codefresh. -* 500 status code on using default GKE/EKS context/cluster names. - -**Applications** -* Trying to commit an application that already exists results in a commit failure. - -**Images** -* Filters are not retained on navigating away from the Images dashboard. - -**Pipelines, workflows and Workflow Templates** - -* Workflow Template filter does not work for Git Source. -* Missing validation for `WORKFLOW_NAME` variable. -* Incorrect sync history date for Workflow Templates. -* Error on detaching predefined filters in pipelines. - -**Integrations** -* Docker Hub integration list appears empty until refreshed even when there are integrations. -* Test Connection option disabled when integration name is not defined. - - - - -## July 2022 - -### Features & enhancements - -#### Hosted GitOps -Codefresh has launched Hosted GitOps, our newest offering, a hosted and managed version of Argo CD. - -From application analytics, to application creation, rollout, and deployment, you get the best of both worlds: Argo CD with Codefresh's advanced functionalities and features for CD operations. -What it also means is easy set up and zero maintenance overhead. - -Read on for a summary of what you get with Hosted GitOps. - -**Hosted runtime** -Hosted GitOps supports hosted runtimes. The runtime is hosted on a Codefresh cluster and managed by Codefresh. Codefresh guides you through the three-step process of setting up your hosted environment. Read more in [Hosted runtime](#hosted-runtime). - -**Dashboards for visibility and traceability** -Here's a recap of Codefresh dashboards, including a brand new dashboard dedicated to DORA metrics: -* Home dashboard: For global analytics and system-wide deployment highlights, start with the Home dashboard. -* DORA metrics: A _new_ dashboard for DORA metrics and DevOps quantification. Read more in [DORA metrics](#dora-metrics). -* Applications dashboard: Easily track deployments and visualize rollouts across clusters and runtimes in the Applications dashboard. - -**Application lifecycle management** -Manage the entire application lifecycle directly in Codefresh, from creating, editing, and deleting applications. -Define all application settings in a single location through the intuitive Form mode or directly in YAML, and commit all changes to Git. - -Synchronize applications manually when needed. Read more in [On-demand app synchronization](#on-demand-app-synchronization). - -**Integrations for image enrichment** -With Hosted GitOps, you can integrate your CI tools with Codefresh for image enrichment. Read more in [Integrations for image enrichment](#integrations-for-image-enrichment) - -{::nomarkdown} -
                                                            -{:/} - -#### Hosted runtime -Hosted GitOps supports a GitHub-based SaaS runtime, hosted on a Codefresh cluster, and managed by Codefresh. -Setting up your hosted environment takes just a few clicks. All you need is a Codefresh account, a Git account, and a Kubernetes cluster to which to deploy your applications. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-hosted-initial-view.png" - url="/images/whats-new/rel-notes-jul22-hosted-initial-view.png" - alt="Hosted runtime setup" - caption="Hosted runtime setup" - max-width="80%" -%} - -Codefresh guides you through the simple three-step process of provisioning your hosted runtime. From that point, Codefresh handles administration and maintenance of the hosted runtime, including version and security updates. - -See [Set up a hosted (Hosted GitOps) environment]({{site.baseurl}}/docs/runtime/hosted-runtime/). - -{::nomarkdown} -
                                                            -{:/} - -#### DORA metrics -DORA metrics have become integral to enterprises wanting to quantify DevOps performance, and Codefresh has out-of-the-box support for it. - -The DORA dashboard in Codefresh goes beyond quantification, with features such as the Totals bar displaying key metrics, filters that allow you to pinpoint just which applications or runtimes are contributing to problematic metrics, show metrics for starred applications, and the ability to set a different view granularity for each DORA metric. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-dora-metrics.png" - url="/images/whats-new/rel-notes-jul22-dora-metrics.png" - alt="DORA metrics" - caption="DORA metrics" - max-width="60%" -%} - -See [DORA metrics]({{site.baseurl}}/docs/reporting/dora-metrics/). - -{::nomarkdown} -
                                                            -{:/} - -#### Integrations for image enrichment -If you have our Hosted GitOps for CD and a different tool for CI, you can continue to enrich images, retaining your CI tools. Allow Codefresh to retrieve and report the image information in your deployments by connecting your CI tools to Codefresh. Connect CI tools, issue tracking tools, container registries, and more. - - -This release introduces our integration offering, starting with: -* GitHub Actions, Jenkins, and Codefresh Classic for CI -* Jira for issue tracking -* Docker Hub, Quay, JFrog Artifactory for container registries - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-github-action-settings.png" - url="/images/whats-new/rel-notes-jul22-github-action-settings.png" - alt="Image enrichment with GitHub Actions integration" - caption="Image enrichment with GitHub Actions integration" - max-width="60%" -%} - - We are continually expanding the range of integrations, so stay tuned for release announcements on new integrations. - -Codefresh encrypts the credentials for every integration you create, and stores them securely as Kubernetes Sealed Secrets, ensuring that the integration flow is completely GitOps-compatible. Pipelines reference the integration by the integration name instead of integration credentials. Codefresh retrieves enrichment information using the encrypted Kubernetes secrets. - -See [Image enrichment with integrations]({{site.baseurl}}/docs/integrations/image-enrichment-overview/). - -{::nomarkdown} -
                                                            -{:/} - -#### Edit and delete applications - -Application management has become easier as you can now edit and delete applications directly in Codefresh. - -Update General and Advanced settings for application. Go directly to the Configuration tab for the application by selecting Edit in the Applications dashboard. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-edit-app-option.png" - url="/images/whats-new/rel-notes-jul22-edit-app-option.png" - alt="Edit application option" - caption="Edit application option" -max-width="80%" -%} - -The Delete application option is available when you select an application. -Codefresh warns you of the implication of deleting the selected application in the Delete form based on the Prune resource setting. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-delete-app.png" - url="/images/whats-new/rel-notes-jul22-delete-app.png" - alt="Delete application" - caption="Delete application" -max-width="50%" -%} - -See [Update application configuration]({{site.baseurl}}/docs/deployment/create-application/#update-application-configuration) and [Delete an application]({{site.baseurl}}/docs/deployment/create-application/#delete-an-application). - -{::nomarkdown} -
                                                            -{:/} - -#### On-demand app synchronization -Manually synchronize applications whenever needed directly from Codefresh. The synchronize option is a significant enhancement to the application lifecycle management options that we already support in Codefresh. - -The set of options for application synchronization are identical to that of Argo CD. For usability, they are grouped into two sets: Revision and Additional Options. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-sync-app.png" - url="/images/whats-new/rel-notes-jul22-sync-app.png" - alt="Synchronize application" - caption="Synchronize application" - max-width="60%" -%} - -{::nomarkdown} -
                                                            -{:/} - -#### Activate access for Codefresh support -User Settings include an option to allow Codefresh support personnel account access for troubleshooting purposes. The option is disabled by default. When enabled, access is always coordinated and approved, and all actions are audited. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-account-access.png" - url="/images/whats-new/rel-notes-jul22-account-access.png" - alt="Enable account access" - caption="Enable account access" - max-width="80%" -%} - -See [Enable access for Codefresh support]({{site.baseurl}}/docs/administration/user-settings/#enable-access-for-codefresh-support). - -{::nomarkdown} -
                                                            -{:/} - -#### View logs by container -When viewing logs for applications and workflows, you can now select the container for which to display them. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jul22-log-container.png" - url="/images/whats-new/rel-notes-jul22-log-container.png" - alt="View logs by container" - caption="View logs by container" - max-width="50%" -%} - -### Bug fixes -**Runtimes** -* Unable to remove managed cluster on failure to add shared configuration repository. -* Maximum character limit not validated in cluster names. -* Failure when downloading logs for all runtime components. -* New cluster automatically assigned Unknown status. -* Sealed secret remains in cluster after uninstalling runtime. -* Unable to view rollouts on managed cluster. - - -**Applications** - -* Resources without namespaces (such as cluster role) do not open in Current State. -* Sync state icon frozen when syncing the application. -* Application created with the same name as deleted application displayed as new deployment. -* No error when creating an application with the same name as an existing application. -* Applications dashboard does not display an application with incorrect Source. -* Applications dashboard does not display the Jira issue for Docker image. -* Sync policy appears as Manual though set to automatic. -* Sync error message partially cut off. -* Application release does not always return binaryId, and repositoryName for transition images. -* Application name not displayed in sync errors. - -**Images** -* Registry filter used with other filters returns wrong results. -* Find query for image applications. - - -**Other** - -* Unable to view, access, and add SSO integrations. -* Failure on sealing key management check. -* Home dashboard: Most active pipelines and Delivery Pipelines displayed not aligned with the Time filter. -* Incorrect sorting for workflow and pipeline lists. - - - -## June 2022 - -### Features & enhancements - -#### Shared configuration for runtimes -Define configuration settings for a runtime once, and reuse the configuration settings for multiple runtimes in the same account. Reduce time needed to define and maintain configuration settings for every runtime separately. - -After defining the repository in which to store configuration settings, you can reference the repository, selectively from specific runtimes, or from all runtimes, and share the configuration. - -Older versions of hybrid runtimes without the shared repository must be upgraded to the latest version to leverage the shared configuration, specifically for integrations with CI platforms and tools. - -For details, see [Shared runtime configuration]({{site.baseurl}}/docs/runtime/shared-configuration/). - -#### Logs for runtime components -View and download logs for runtimes and runtime components. The logs record events from the time of application launch for all resources in the application. - -Download logs for offline viewing and analysis, or view logs per component online, and download as needed: - -* Download all logs: Available for every runtime for download as a single `.tar.gz` file, including the different log files for each runtime component. -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-runtime-logs-all.png" - url="/images/whats-new/rel-notes-june22-runtime-logs-all.png" - alt="Download all logs for a runtime" - caption="Download all logs for a runtime" - max-width="60%" - %} - -* View/download logs per component: Available for every runtime component. View online logs, displaying up to 1000 lines of the most recent events. Locate information with free-text search, and navigate between search results using the next/previous buttons. Enhance readability by turning on line-wrap when needed. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-runtime-log-screen.png" - url="/images/whats-new/rel-notes-june22-runtime-log-screen.png" - alt="View logs online for runtime component" - caption="View logs online for runtime component" - max-width="60%" - %} - -For details, see [View/download runtime logs]({{site.baseurl}}/docs/runtime/download-runtime-logs/). - -#### OAuth2 authentication -OAuth (Open Authorization) 2.0 has become an industry standard for online authorization. Codefresh supports connections to your Git provider using OAuth2. Codefresh integrates with Git to sync repositories to your clusters, implement Git-based actions when creating resources such as Delivery Pipelines, and to enrich Images with valuable information. - -Codefresh provides a default, predefined OAuth2 application for every runtime. As an account administrator in Codefresh, you can optionally create an OAuth2 Application in GitHub and set up authentication within Codefresh. Users in Codefresh can then authorize access to GitHub with OAuth2, instead of with a personal access token. - -For details, see [Set up OAuth2 authentication]({{site.baseurl}}/docs/administration/oauth-setup/). - - -#### Application resources in Tree view -The Tree view of the Current State complements the List view of the same in the Applications dashboard. Similar to the List view, the Tree view also displays all the resources deployed for an application, with additional unique features. - -What is unique about the Tree view? -First, the Tree view simplifies visualization of and tracking resources for any deployment, think complex deployments with hundreds of resources. Second, it is designed to impart key information for each resource at a glance. Every resource shows its health status (color-coded border), sync state (icon prefixed to name), and metadata on mouse-over. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-tree-view.png" - url="/images/whats-new/rel-notes-june22-tree-view.png" - alt="Application Current State: Tree view" - caption="Application Current State: Tree view" - max-width="60%" - %} - - -**Progressive discovery** - -By the very nature of its design, the Tree View allows progressive discovery. View all resources at once, or start with a parent resource, and expand it to view siblings and children to understand how they are connected. - -**Resource filters** - -The filters in the List view are available also in the Tree view. These global filters help narrow the scope of the resources displayed, by kind, health status, and sync state. The filters set in either the List or Tree vies are retained when navigating between them. - -**Resource search and find** - -The Search option lets you locate resources by searching for any part of the resource name. Similar to the filters, search results are also retained when navigating between Tree and List views. -For quick search, use the Find option to locate and navigate to required resources. - -**Resource inventory** - -At the bottom-left, the resource inventory summarizes your deployment in numbers per resource kind. Syncing and Out-of-Sync resources for each kind are bucketed separately, for visibility, and for quick access to filter resources by these states. - -**Resource manifest and logs** - -In addition to the metadata on mouse-over for a resource, clicking a resource shows its manifests and logs based on the resource type. View and compare the Desired and Live states for managed resources in Git repositories. -Another usability enhancement is the ability to share resource details by copying the URL and sending it to others in your organization for collaborative review. - -Logs are displayed if the resource has logs: - -* For online viewing, you have free-text search and line-wrap functionalities. -* For offline viewing and analysis, you can download the complete log into a text file. - -For details, see [Current State Tree view]({{site.baseurl}}/docs/deployment/applications-dashboard/view-modes-for-application-resources/#working-with-resources-in-tree-view). - -#### Application rollout visualization -In addition to installing Argo Rollouts in your cluster, visualize Argo Rollout history and progress directly in the Applications (deployment) dashboard. Visualize rollouts from multiple clusters and runtimes in a single centralized location through the Deployment tab. - -**Rollout progress** -Ongoing rollouts show the progress of the rollout in the real time. Completed rollouts show the switch to the new version according to the deployment strategy. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-rollout-in-progress.png" - url="/images/whats-new/rel-notes-june22-rollout-in-progress.png" - caption="Application Rollout: Progress visualization" - max-width="60%" - %} - -**Rollout steps** - -As the rollout occurs, visualize step-by-step progress. Expanding Background Analysis displays metric queries and the manifest of the analysis template. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-rollout-analysis.png" - url="/images/whats-new/rel-notes-june22-rollout-analysis.png" - caption="Application Rollout: Steps visualization" - max-width="30%" - %} - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-june22-rollout-query-metrics.png" - url="/images/whats-new/rel-notes-june22-rollout-query-metrics.png" - caption="Application Rollout: Query metrics" - max-width="30%" - %} - -For details, see [Rollout progress and step visualization]({{site.baseurl}}/docs/deployment/applications-dashboard/#monitor-rollouts-by-deployment). - -#### Nested workflows -Add nested workflow functionality to Codefresh pipelines. A nested workflow is a step within the parent workflow that either submits a new workflow, or creates a PR (Pull Request) that runs a different workflow based on the PR result. - -Nested workflows run independently of the parent workflow that submitted them. A nested submit workflow has traceability in both directions, from the parent to child, and from the child to the parent. A workflow triggered by a nested PR identifies the PR that triggered it. - -Here’s an example of a parent workflow that submits two nested workflows, and the link back to the parent workflow from one of the child workflows. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jun22-nested-parent-submit.png" - url="/images/whats-new/rel-notes-jun22-nested-parent-submit.png" - caption="Parent workflow with two nested submit workflows" - max-width="60%" - %} - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-jun22-nested-child-submit.png" - url="/images/whats-new/rel-notes-jun22-nested-child-submit.png" - caption="Child submit workflow with link to parent workflow" - max-width="60%" - %} - -The Codefresh Hub for Argo has two ready-to-use Workflow Templates that: - -* Submits a workflow -* Creates a PR to run the workflow that tracks the PR - -For details, see [Nested workflows]({{site.baseurl}}/docs/pipelines/nested-workflows/). - -### Bug fixes -**Runtimes** - -* Encrypted Git integration remains when uninstalling runtime through the CLI, and decryption through app-proxy fails. -* Rollback occurs during installation via CLI. -* Runtime ignores –Demo resources=false flag install confirmation. -* Installation via CLI stops when demo resources are not installed even when –demo -resources flag is set to false. -* No errors during installation via CLI when flags are incorrectly located. -* Runtime name with health or sync errors not highlighted in Codefresh UI. - -**Images** - -* Empty pages on changing filters in page two or higher. -* Broken link for an image not in logged-in user account. -* Images view not updated with current application with rollout resource. - -**Applications** - -* Lock out due to slow application load. -* Application dashboard remains frozen in Progressing state. -* Application dashboard > Timeline tab: - - * Default view not restored on removing date range defined in the Timeline tab. - * Order of deployments in the chart not identical to the list of rollouts. - * Committer for GitOps change missing in Commit information. - * Missing commit message for SHA link. - * Changes to an image tag not reflected. - * Rollout shows as in progress even after deployment status is healthy. - * New release in Argo CD not displayed in Codefresh UI when latest release was degraded without previous rollout data. - * Rollout YAML unavailable when application source is a Helm repo. -* Applications dashboard > Services tab: - - * Progressing rollout with manual traffic management returns empty Services list. -* Applications dashboard > Current State - * Resource tree/list not restored on removing filters. - - -**Pipelines** - -* Selecting an existing Workflow Template creates a new Workflow Template. -* Incorrect line numbers for pipeline template in Form mode. - - -## May 2022 - -### Features & enhancements - -#### Runtime disaster recovery -Runtimes are integral to all CI/CD actions and operations in Codefresh. In this release, we added the capability to restore runtimes in case of cluster failures, either partial or complete. -All you need is the existing Git repo where you installed the runtime containing the runtime resources. The restore process reinstalls the runtime, leveraging the resources in the existing repo. You can choose to restore the runtime to the failed cluster or to a different cluster. -For details, see [Restore runtimes]({{site.baseurl}}/docs/runtime/runtime-recovery/). - -#### AWS ALB ingress controller -AWS Application Load Balancer (ALB) is now part of our list of supported ingress controllers. -For details, see Ingress controller requirements in [Requirements]({{site.baseurl}}/docs/runtime/requirements/#ingress-controller), and [Post-installation configuration]({{site.baseurl}}/docs/runtime/installation/#post-installation-configuration). - - -#### Labels for runtime namespace -When installing runtimes, the `--namespace-label` flag lets you add labels to the runtime namespace. The labels identify and grant access to the installation network, required with service mesh ingress controllers such as Istio. -For both CLI-based and silent installations, add the flag followed by one or more labels in `key=value` format. Note that these labels must be identical to those defined in the 'namespace' resource spec. -For details, see [Runtime installation flags]({{site.baseurl}}/docs/runtime/installation/#runtime-installation-flags). - -#### Internal and external ingress hosts -Codefresh runtimes support defining two ingress hosts, an internal and an external ingress host, for private and public networks. Previously, runtimes supported a single ingress host for both the app-proxy and webhook ingress resources. Internal and external ingress separation allows you to expose the Codefresh app-proxy service only within your private network, while keeping the webhook ingress unchanged. -* New runtime installations: The `--internal-ingress-host` flag lets you can define an ingress host for communication with the app-proxy. For details, see [Runtime installation flags]({{site.baseurl}}/docs/runtime/installation/#runtime-installation-flags). -* Existing runtimes: To add an internal ingress host, you need to commit changes to the installation repository by modifying `app-proxy ingress` and `.yaml`. -For details, see _Internal ingress host configuration (optional)_ in [Post-installation configuration]({{site.baseurl}}/docs/runtime/installation#post-installation-configuration). - -For further customizations, add annotations for internal and external ingress hosts through the `--internal-ingress-annotation` and `--external-ingress-annotation` flags. - -#### oktapreview domain support -You can set up Okta SSO to log into your Okta preview environment. - -#### Git Source enhancements -A common scenario when using Git repositories for CI/CD is to include or exclude specific files or directories in the target repository from the destination repo or cluster. When creating or editing Git Sources in Codefresh, you can now include or exclude folders and files in the target Git repo, using Glob patterns for the same. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-may22-git-source-exclude-include.png" - url="/images/whats-new/rel-notes-may22-git-source-exclude-include.png" - alt="Include/exclude options in Git Source" - caption="Include/exclude options in Git Source" - max-width="50%" - %} - -You can also delete Git Sources if needed. Selecting additional actions for a Git Source, displays the Git Source details with the Delete option. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-may22-git-source-delete.png" - url="/images/whats-new/rel-notes-may22-git-source-delete.png" - alt="Delete Git Source" - caption="Delete Git Source" - max-width="90%" - %} - -For details, see [Add and manage Git Sources]({{site.baseurl}}/docs/runtime/git-sources/). - -### Bug fixes -**Runtimes** - -* With Istio ingress, app proxy communication with Argo CD fails with `Unexpected token u in JSON error`. -* Adding a managed cluster always commits manifests to the main branch and not to the defined default branch. -* Add managed cluster command fails when ingress host includes `/` suffix. -* Application groups not supported in Current State for older runtime versions. -* Retrieving a list of Git Sources for a runtime via CLI, causes the CLI to crash. -* Uninstalling a runtime does not remove runtime-related secrets from the cluster. - -**Applications** - -* Applications deleted from the Argo UI not removed from the Applications dashboard in Codefresh. -* Back button in Applications > Timeline tab does not work. -* Hierarchy for AppSet application created in Argo CD not rendered correctly in Codefresh. -* Most Active Applications list in the Home dashboard is incorrectly sorted. -* Link to CI build on Service in Applications Dashboard is hard-coded to Workflows. -* Add Application wizard creates invalid manifest. -* Removing a resource from an application does not remove it from the application’s Current State list. -* Deleting an application deletes it from the cluster and the Git repo, but not from the database. -* Creating an application without path results in an error. -* On page reload, deployment chart in Application > Timeline tab does not reflect set filters. -* Resources with changed file names are not reported in Argo CD. -* Unknown state for application sets with targets on external clusters. - -**Others** -* Clicking the Settings icon shows a console error. -* Workflow Templates reported without Git manifests and desired state. -* Get list of workflows for a pipeline via CLI returns 400 bad request. -* GitHub user without a public email address causes autopilot to crash in app-proxy. -* Within a staging app, regular deployment transition is empty and shows only replicas count. - - -## March-April 2022 - -### Features & enhancements - -#### Kubernetes version runtime support -We now support the latest Kubernetes server versions, 1.22 and 1.23. - -#### Ingress controllers -We are continually working on supporting additional Ingress controllers, and this release adds support for: -* Ambassador -* NGINX Enterprise -* Istio -* Traefik - -All ingress controllers must be configured to report their status. -For details, see [Ingress controller requirements]({{site.baseurl}}/docs/runtime/requirements/#ingress-controller). - - -#### Argo CD managed cluster support -Argo CD can manage clusters without Argo CD installed on them. Now you have the same functionality in Codefresh, to add, view, and manage remote clusters. -Admins can add an external cluster to a Codefresh runtime, and register it automatically as a managed cluster. From that point on, you have complete visibility into health and sync status, and options to manage them, including installing Argo Rollouts. - -With managed clusters in Codefresh, you get: -* Streamlined management: All cluster- and cluster-component level operations are managed through the runtime, in a centralized location. You can install new components, uninstall existing components, and remove the cluster from the runtime's managed list. A single click installs Argo Rollouts on the managed cluster. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-argo-rollouts.png" - url="/images/whats-new/rel-notes-argo-rollouts.png" - alt="Install Argo Rollouts for managed cluster in topology view" - caption="Install Argo Rollouts for managed cluster in topology view" - max-width="70%" - %} - -* Seamless upgrades: Upgrades to runtimes or to runtime components in the local cluster automatically upgrades those in managed clusters as well. -* Integration with dashboards: Applications dashboards reflect deployment information for applications in all managed clusters. When Argo Rollouts are installed, application rollouts are also reported to the dashboard. - -For details, see [Managed clusters]({{site.baseurl}}/docs/runtime/managed-cluster). - -#### Topology views for runtimes - -Get a visual representation of the runtimes in your deployments, managed clusters, and cluster components with the Topology view for runtimes. -Quickly identify key information such as health and sync status, and version. -Add new clusters to or remove existing clusters from runtime management. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-runtime-topology-view.png" - url="/images/whats-new/rel-notes-runtime-topology-view.png" - alt="Runtime topology view" - caption="Runtime topology view" - max-width="70%" - %} - -For details, see [Topology view for runtimes]({{site.baseurl}}/docs/runtime/monitor-manage-runtimes/#topology-view). - -#### Analytics dashboard -In addition to Delivery Pipelines, the Analytics dashboard shows Runtimes, Managed Clusters, Deployments, and Applications, to give you the complete CI/CD picture with key facts and insights. - -**Usability enhancements** - * Global filters are now located at the top of the dashboard. - * Resource-specific filters are available for that resource. - * A convenient View button takes you to the dedicated resource view for additional analysis. - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-analytics-dashboard.png" - url="/images/whats-new/rel-notes-analytics-dashboard.png" - alt="Analytics dashboard" - caption="Analytics dashboard" - max-width="70%" - %} - -#### Applications dashboard -The Applications dashboard displays the individual deployments across your enterprise. Here are the main enhancements: - -**Application inventory and status filters** - - The health status snapshot in the Applications dashboard also works as a quick filter. Selecting a status filters applications by that status. - Filter criteria that match child applications automatically expands the parent application to show the child applications. - - {% include - image.html - lightbox="true" - file="/images/whats-new/app-dashboard-status-filter.png" - url="/images/whats-new/app-dashboard-status-filter.png" - alt="Applications dashboard: Filter by status" - caption="Applications dashboard: Filter by status" - max-width="70%" - %} - -**Rollouts** - - Intuitive visualization with the option to open the Images view in a new browser window. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-apps-open-image.png" - url="/images/whats-new/rel-notes-apps-open-image.png" - alt="Applications dashboard: Link to Image view" - caption="Applications dashboard: Link to Image view" - max-width="70%" - %} - -**Git committers** - Hovering over an avatar shows all commits made by that committer. - - -**Current state of cluster resources** - Hierarchical representation of the resources deployed by this application in the cluster. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-app-current-state.png" - url="/images/whats-new/rel-notes-app-current-state.png" - alt="Applications dashboard: Current State" - caption="Applications dashboard: Current State" - max-width="70%" - %} - -#### Workflow Templates -Codefresh provides full-fledged management for the Workflow Template resource, from optimizing existing Workflow Templates, to creating new ones, and testing Workflow Templates before commit. - - {% include - image.html - lightbox="true" - file="/images/whats-new/wrkflow-template-main.png" - url="/images/whats-new/wrkflow-template-main.png" - alt="Workflow Templates" - caption="Workflow Templates" - max-width="70%" - %} - -**Create, test, and optimize Workflow Templates** - Create Workflow Templates in three steps. Start by selecting one from the Codefresh Hub for Argo, or start with a blank template form. Customize the Workflow Template, and either run the template to test it or commit to submit it. - - {% include - image.html - lightbox="true" - file="/images/whats-new/wrkflow-template-add.png" - url="/images/whats-new/wrkflow-template-add.png" - alt="Add Workflow Template panel" - caption="Add Workflow Template panel" - max-width="50%" - %} - - For both new and existing Workflow Templates, the **Run** option enables you to test a new template, or changes to an existing template, without needing to first commit the changes. If the Workflow Template has previous iterations, you can view the arguments and values used in those iterations. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-wrkflow-temp-manifest-run.png" - url="/images/whats-new/rel-notes-wrkflow-temp-manifest-run.png" - alt="Run option for Workflow Templates" - caption="Run option for Workflow Templates" - max-width="70%" - %} - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-wrkflow-temp-run-args-view.png" - url="/images/whats-new/rel-notes-wrkflow-temp-run-args-view.png" - alt="Run Workflow Template: Arguments list" - caption="Run Workflow Template: Arguments list" - max-width="40%" - %} - - The Workflows and Delivery Pipelines tabs associated with the selected Workflow Template are displayed in the respective tabs, giving you all the information in the same location. - - -**Rename Workflow Template** - After creating a Workflow Template, you can rename it by selecting the template and clicking **Rename**. - The new name must be unique within the cluster. - - {% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-rename-workflow-template.png" - url="/images/whats-new/rel-notes-rename-workflow-template.png" - alt="Rename Workflow Template" - caption="Rename Workflow Template" - max-width="70%" - %} - - -#### Application creation wizard - -Create applications that are fully GitOps-compliant from the Codefresh UI. The application manifest is generated, committed to Git, and synced to your cluster. -When creating the application, you can use the UI forms, or edit the manifest directly. - - - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-app-create-settings.png" - url="/images/whats-new/rel-notes-app-create-settings.png" - alt="Application settings in application creation wizard" - caption="Application settings in application creation wizard" - max-width="70%" -%} - - -#### Delivery Pipeline flows -The Delivery Pipeline flow features several usability and functionality enhancements. - -**Seamless integration of Argo Event information with Argo Workflows** - - Once a workflow is submitted for a Delivery Pipeline, the Workflows tab visualizes the connections between the steps in the workflow. - With Argo Event information for the workflow also incorporated into the visualization, you have a unified view of Argo Events and Argo Workflows in one and the same location, the events that triggered the workflow combined with the workflow itself. - - The Event Source manifest, the event payload, and the Sensor manifest are displayed as pull-out panels, allowing you to easily copy paths for attributes from event payloads, view logs, and download artifacts. - - This example shows the event payload from Argo Events for the workflow. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-event-payload.png" - url="/images/whats-new/rel-notes-event-payload.png" - alt="Panel with Event Payload in Workflows tab" - caption="Panel with Event Payload in Workflows tab" - max-width="70%" -%} - - This example shows the sensor manifest from Argo Events for the workflow. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-event-srce-manifest.png" - url="/images/whats-new/rel-notes-event-srce-manifest.png" - alt="Panel with Sensor manifest in Workflows tab" - caption="Panel with Sensor manifest in Workflows tab" - max-width="70%" -%} - -**Rename trigger resource** - - Similar to Workflow Templates, you can now change the trigger name of a Delivery Pipeline. The sensor name cannot be changed. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-rename-pipeline-trigger.png" - url="/images/whats-new/rel-notes-rename-pipeline-trigger.png" - alt="Rename trigger option for Delivery Pipeline" - caption="Rename trigger option for Delivery Pipelines" - max-width="70%" -%} - -**Git repo selection for commits** - - A dropdown list allows you to select one or more Git repos in the Trigger Conditions tab. Start typing, and use autocomplete to view and select from the available Git repos. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-git-repo-select.png" - url="/images/whats-new/rel-notes-git-repo-select.png" - alt="Git repo selection for Delivery Pipelines" - caption="Git repo selection for Delivery Pipelines" - max-width="70%" -%} - - -**Errors/warning in manifests synced with the line number in manifest** - - Clicking the line number next to an error or a warning changes focus to the line in the manifest file with the error or warning. - - -#### Workflows dashboard enhancements - -**Link from workflows to their pipelines** - - Workflow names in the dashboard are clickable links. Clicking a workflow name takes you directly to the pipeline associated with that workflow. - -**New status for active workflows without events** - -Identify workflows that are active but do not have any execution data with the new status filter in the Workflows dashboard. Filtering by Status ‘Unknown’ shows workflows without events for the last hour. - -{% include - image.html - lightbox="true" - file="/images/whats-new/rel-notes-workflow-unknown-status.png" - url="/images/whats-new/rel-notes-workflow-unknown-status.png" - alt="Unknown status filter for workflows" - caption="Unknown status filter for workflows" - max-width="50%" -%} - -#### Image reporting with Docker config.json -You can now also authenticate to a Docker registry using `docker./config.json` to report image information. Note that `config.json` is not currently supported for GCR, ECR, and ACR. -For more information on the required fields, see [Report image info](https://github.com/codefresh-io/argo-hub/blob/main/workflows/codefresh-csdp/versions/0.0.6/docs/report-image-info.md){:target="\_blank"}. - - -#### OpenShift 4.8 support -CSDP supports Red Hat OpenShift 4.8. For detailed information, read their [blog](https://cloud.redhat.com/blog/red-hat-openshift-4.8-is-now-generally-available#:~:text=OpenShift%204.8%20improves%20the%20bare,is%20now%20shipping%20with%20OpenShift){:target="\_blank"}. - -### Bug fixes - -**Applications dashboard** - -* Inaccurate results when filtering by Application type. -* Cluster shows the address of the Argo CD cluster instead of the target cluster. -* Broken Commit link in Application Preview. -* Filter by favorites does not show ApplicationSets. -* Releases not ordered correctly. -* Missing tags for Application/AppllicationSet. -* Loop created on changing date in the Applications dashboard. -* Rollouts in Deployment chart not aligned with the actual order of rollouts. -* Missing current release label. -* Missing commit message -* JIRA annotations not displayed for Images in Docker.io. -* Avatars show up intermittently. -* Incorrect Committers in Applications dashboard. -* Performance issues. - -**Images** - -* Duplicate applications in Images repositories with different tags. -* Unmarked deployed images. - -**Pipelines** - -* Empty event-sources. -* Missing created/updated/deleted status for resources. -* Event mapping issues. -* Creating a new pipeline with an existing Template shows empty Template tab. - -**Upgrade** - -* Agent upgrade overrides configuration in previous release. - -**Uninstall** - -* Artifacts in database after uninstalling with `--force` flag. -* Uninstallation issues with newer K8s versions. - - diff --git a/_docs/pipelines/concurrency-limit.md b/_docs/workflows/concurrency-limit.md similarity index 98% rename from _docs/pipelines/concurrency-limit.md rename to _docs/workflows/concurrency-limit.md index 780a53a4..405361dd 100644 --- a/_docs/pipelines/concurrency-limit.md +++ b/_docs/workflows/concurrency-limit.md @@ -1,13 +1,13 @@ --- title: "Selectors for concurrency synchronization" description: "" -group: pipelines +group: workflows toc: true --- Argo Workflows has a synchronization mechanism to limit parallel execution of specific workflows or templates within workflows, as required. -The mechanism enforces this with either semaphore or mutex synchronization configurations. For detailed information, see [Synchronization](https://argoproj.github.io/argo-workflows/synchronization/). +The mechanism enforces this with either semaphore or mutex synchronization configurations. For detailed information, see [Synchronization](https://argoproj.github.io/argo-workflows/synchronization/){:target="\_blank"}. Codefresh supports an additional level of concurrency synchronization, with _selectors_, for both workflows and templates. @@ -163,4 +163,6 @@ synchronization: - synchronization-wf-8lf9b semaphore: argo/ConfigMap/semaphore-config/workflow?repository=denis-codefresh/argo-workflows&branch=feature ``` +## Related articles +[Creating workflows]({{site.baseurl}}/docs/workflows/create-pipeline) diff --git a/_docs/pipelines/configure-artifact-repository.md b/_docs/workflows/configure-artifact-repository.md similarity index 98% rename from _docs/pipelines/configure-artifact-repository.md rename to _docs/workflows/configure-artifact-repository.md index 894ec98c..3f4b6057 100644 --- a/_docs/pipelines/configure-artifact-repository.md +++ b/_docs/workflows/configure-artifact-repository.md @@ -1,7 +1,7 @@ --- title: "Configure artifact repository" description: "" -group: pipelines +group: workflows toc: true --- @@ -179,3 +179,6 @@ As the final step in configuring the artifact repository, for the `argo-server` 1. Wait for the configuration changes to take effect on the cluster. 1. Check the `argo-server` service account and verify that it is updated with the user-provided `annotation`. 1. Select the `argo-server-<#>` pod or pods and delete them. + +## Related articles +[Creating workflows]({{site.baseurl}}/docs/workflows/create-pipeline) \ No newline at end of file diff --git a/_docs/pipelines/create-pipeline.md b/_docs/workflows/create-pipeline.md similarity index 97% rename from _docs/pipelines/create-pipeline.md rename to _docs/workflows/create-pipeline.md index 0f11bf6a..60cb8076 100644 --- a/_docs/pipelines/create-pipeline.md +++ b/_docs/workflows/create-pipeline.md @@ -1,7 +1,7 @@ --- -title: "Pipeline creation" +title: "Creating workflows" description: "" -group: pipelines +group: workflows toc: true --- @@ -33,7 +33,7 @@ An intuitive selection mechanism enables you to easily select and configure each ### Delivery Pipeline creation flow Here's a high-level overview of the Delivery Pipeline creation flow. -For step-by-step instructions, see [How to: create a Delivery Pipeline]({{site.baseurl}}/docs/pipelines/create-pipeline/#how-to-create-a-delivery-pipeline). +For step-by-step instructions, see [How to: create a Delivery Pipeline]({{site.baseurl}}/docs/workflows/create-pipeline/#how-to-create-a-delivery-pipeline). 1. Define pipeline name and select Workflow Template to execute 1. Define default values for pipeline workflow template arguments @@ -56,7 +56,7 @@ In the Delivery Pipeline wizard, we have our starter Workflow Template to use as -> To share artifacts between steps in workflows, and to view archived logs for completed workflows, you must [configure an artifact repository in Codefresh]({{site.baseurl}}/docs/pipelines/configure-artifact-repository). +> To share artifacts between steps in workflows, and to view archived logs for completed workflows, you must [configure an artifact repository in Codefresh]({{site.baseurl}}/docs/workflows/configure-artifact-repository). @@ -278,3 +278,7 @@ Follow the step-by-step instructions to guide you through Delivery Pipeline wiza Codefresh commits the pipeline to the Git repository, and then syncs it to the cluster. Wait a few seconds for the sync to complete, and verify that the pipeline is displayed in the [Delivery Pipelines](https://g.codefresh.io/2.0/pipelines){:target="\_blank"} page. +## Related articles +[Selectors for concurrency synchronization]({{site.baseurl}}/docs/workflows/concurrency-limit) +[Nested workflows]({{site.baseurl}}/docs/workflows/nested-workflows) +[Configure artifact repository]({{site.baseurl}}/docs/workflows/configure-artifact-repository) diff --git a/_docs/pipelines/nested-workflows.md b/_docs/workflows/nested-workflows.md similarity index 99% rename from _docs/pipelines/nested-workflows.md rename to _docs/workflows/nested-workflows.md index 7264b1bf..7539c44d 100644 --- a/_docs/pipelines/nested-workflows.md +++ b/_docs/workflows/nested-workflows.md @@ -1,7 +1,7 @@ --- title: "Nested workflows" description: "" -group: pipelines +group: workflows toc: true --- diff --git a/_docs/pipelines/sharing-file-system.md b/_docs/workflows/sharing-file-system.md similarity index 99% rename from _docs/pipelines/sharing-file-system.md rename to _docs/workflows/sharing-file-system.md index 12722a43..c01e0125 100644 --- a/_docs/pipelines/sharing-file-system.md +++ b/_docs/workflows/sharing-file-system.md @@ -1,7 +1,7 @@ --- title: "Sharing file systems" description: "" -group: pipelines +group: workflows toc: true --- diff --git a/_docs/pipelines/workflows.md b/_docs/workflows/workflows.md similarity index 100% rename from _docs/pipelines/workflows.md rename to _docs/workflows/workflows.md diff --git a/images/administration/access-control/kubernetes-abac.png b/images/administration/access-control/kubernetes-abac.png new file mode 100644 index 00000000..6dd650d5 Binary files /dev/null and b/images/administration/access-control/kubernetes-abac.png differ diff --git a/images/administration/access-control/kubernetes-policies.png b/images/administration/access-control/kubernetes-policies.png new file mode 100644 index 00000000..97249cfe Binary files /dev/null and b/images/administration/access-control/kubernetes-policies.png differ diff --git a/images/administration/access-control/pipeline-git-restrictions.png b/images/administration/access-control/pipeline-git-restrictions.png new file mode 100644 index 00000000..caa94620 Binary files /dev/null and b/images/administration/access-control/pipeline-git-restrictions.png differ diff --git a/images/administration/access-control/pipeline-restrictions.png b/images/administration/access-control/pipeline-restrictions.png new file mode 100644 index 00000000..75fbafbd Binary files /dev/null and b/images/administration/access-control/pipeline-restrictions.png differ diff --git a/images/administration/access-control/pipeline-tags.png b/images/administration/access-control/pipeline-tags.png new file mode 100644 index 00000000..22b921e6 Binary files /dev/null and b/images/administration/access-control/pipeline-tags.png differ diff --git a/images/administration/access-control/security-timeout.png b/images/administration/access-control/security-timeout.png new file mode 100644 index 00000000..41d477fe Binary files /dev/null and b/images/administration/access-control/security-timeout.png differ diff --git a/images/administration/access-control/tagging-kubernetes-clusters.png b/images/administration/access-control/tagging-kubernetes-clusters.png new file mode 100644 index 00000000..0d118f98 Binary files /dev/null and b/images/administration/access-control/tagging-kubernetes-clusters.png differ diff --git a/images/administration/access-control/teams.png b/images/administration/access-control/teams.png new file mode 100644 index 00000000..58f7c2c8 Binary files /dev/null and b/images/administration/access-control/teams.png differ diff --git a/images/administration/access-control/user-access-control.png b/images/administration/access-control/user-access-control.png new file mode 100644 index 00000000..95410175 Binary files /dev/null and b/images/administration/access-control/user-access-control.png differ diff --git a/images/administration/audit/api-call-details.png b/images/administration/audit/api-call-details.png new file mode 100644 index 00000000..5bae454c Binary files /dev/null and b/images/administration/audit/api-call-details.png differ diff --git a/images/administration/audit/audit-filter.png b/images/administration/audit/audit-filter.png new file mode 100644 index 00000000..4a8f7762 Binary files /dev/null and b/images/administration/audit/audit-filter.png differ diff --git a/images/administration/audit/audit-logs.png b/images/administration/audit/audit-logs.png new file mode 100644 index 00000000..0bcda78c Binary files /dev/null and b/images/administration/audit/audit-logs.png differ diff --git a/images/administration/audit/audit-triggers.png b/images/administration/audit/audit-triggers.png new file mode 100644 index 00000000..20ca12d8 Binary files /dev/null and b/images/administration/audit/audit-triggers.png differ diff --git a/images/administration/audit/icon-UnorderedList-notification.png b/images/administration/audit/icon-UnorderedList-notification.png deleted file mode 100644 index 0ba6f178..00000000 Binary files a/images/administration/audit/icon-UnorderedList-notification.png and /dev/null differ diff --git a/images/administration/create-account/bitbucket-authorize.png b/images/administration/create-account/bitbucket-authorize.png new file mode 100644 index 00000000..987adbff Binary files /dev/null and b/images/administration/create-account/bitbucket-authorize.png differ diff --git a/images/administration/create-account/codefresh-accountname.png b/images/administration/create-account/codefresh-accountname.png new file mode 100644 index 00000000..44cfd103 Binary files /dev/null and b/images/administration/create-account/codefresh-accountname.png differ diff --git a/images/administration/create-account/codefresh-dashboard.png b/images/administration/create-account/codefresh-dashboard.png new file mode 100644 index 00000000..86c077b0 Binary files /dev/null and b/images/administration/create-account/codefresh-dashboard.png differ diff --git a/images/administration/create-account/codefresh-personalize.png b/images/administration/create-account/codefresh-personalize.png new file mode 100644 index 00000000..205fd55b Binary files /dev/null and b/images/administration/create-account/codefresh-personalize.png differ diff --git a/images/administration/create-account/codefresh-signup.png b/images/administration/create-account/codefresh-signup.png new file mode 100644 index 00000000..ab7a3566 Binary files /dev/null and b/images/administration/create-account/codefresh-signup.png differ diff --git a/images/administration/create-account/create-account-steps.png b/images/administration/create-account/create-account-steps.png new file mode 100644 index 00000000..9769257b Binary files /dev/null and b/images/administration/create-account/create-account-steps.png differ diff --git a/images/administration/create-account/git-firewall.png b/images/administration/create-account/git-firewall.png new file mode 100644 index 00000000..bf4842c4 Binary files /dev/null and b/images/administration/create-account/git-firewall.png differ diff --git a/images/administration/create-account/github-authorize.png b/images/administration/create-account/github-authorize.png new file mode 100644 index 00000000..49aa8809 Binary files /dev/null and b/images/administration/create-account/github-authorize.png differ diff --git a/images/administration/create-account/gitlab-authorize.png b/images/administration/create-account/gitlab-authorize.png new file mode 100644 index 00000000..d666b39f Binary files /dev/null and b/images/administration/create-account/gitlab-authorize.png differ diff --git a/images/administration/create-account/select-identity-provider.png b/images/administration/create-account/select-identity-provider.png new file mode 100644 index 00000000..d5321606 Binary files /dev/null and b/images/administration/create-account/select-identity-provider.png differ diff --git a/images/administration/create-account/stash.png b/images/administration/create-account/stash.png new file mode 100644 index 00000000..1415da35 Binary files /dev/null and b/images/administration/create-account/stash.png differ diff --git a/images/administration/manage-pats/allow-support-access.png b/images/administration/manage-pats/allow-support-access.png new file mode 100644 index 00000000..2f58419c Binary files /dev/null and b/images/administration/manage-pats/allow-support-access.png differ diff --git a/images/administration/manage-pats/bitbucket-pat-scopes.png b/images/administration/manage-pats/bitbucket-pat-scopes.png new file mode 100644 index 00000000..3a81e17a Binary files /dev/null and b/images/administration/manage-pats/bitbucket-pat-scopes.png differ diff --git a/images/administration/manage-pats/github-pat-scopes.png b/images/administration/manage-pats/github-pat-scopes.png new file mode 100644 index 00000000..bd3443fb Binary files /dev/null and b/images/administration/manage-pats/github-pat-scopes.png differ diff --git a/images/administration/manage-pats/gitlab-pat-scopes.png b/images/administration/manage-pats/gitlab-pat-scopes.png new file mode 100644 index 00000000..08911e5d Binary files /dev/null and b/images/administration/manage-pats/gitlab-pat-scopes.png differ diff --git a/images/administration/manage-pats/oauth-user-authentication.png b/images/administration/manage-pats/oauth-user-authentication.png new file mode 100644 index 00000000..d57bbb41 Binary files /dev/null and b/images/administration/manage-pats/oauth-user-authentication.png differ diff --git a/images/administration/manage-pats/user-settings-pat.png b/images/administration/manage-pats/user-settings-pat.png new file mode 100644 index 00000000..fd97a104 Binary files /dev/null and b/images/administration/manage-pats/user-settings-pat.png differ diff --git a/images/administration/sso/collaborators.png b/images/administration/sso/collaborators.png index e589219d..a0e022c1 100644 Binary files a/images/administration/sso/collaborators.png and b/images/administration/sso/collaborators.png differ diff --git a/images/administration/user-settings/allow-support-access.png b/images/administration/user-settings/allow-support-access.png new file mode 100644 index 00000000..2f58419c Binary files /dev/null and b/images/administration/user-settings/allow-support-access.png differ diff --git a/images/administration/user-settings/notifications.png b/images/administration/user-settings/notifications.png new file mode 100644 index 00000000..2e54acc6 Binary files /dev/null and b/images/administration/user-settings/notifications.png differ diff --git a/images/administration/users/invite-users.png b/images/administration/users/invite-users.png new file mode 100644 index 00000000..ee244385 Binary files /dev/null and b/images/administration/users/invite-users.png differ diff --git a/images/deployments/helm/add-helm-repository.png b/images/deployments/helm/add-helm-repository.png new file mode 100644 index 00000000..f9227b25 Binary files /dev/null and b/images/deployments/helm/add-helm-repository.png differ diff --git a/images/deployments/helm/connect-helm-repo.png b/images/deployments/helm/connect-helm-repo.png new file mode 100644 index 00000000..57631986 Binary files /dev/null and b/images/deployments/helm/connect-helm-repo.png differ diff --git a/images/deployments/helm/diff.png b/images/deployments/helm/diff.png new file mode 100644 index 00000000..56888435 Binary files /dev/null and b/images/deployments/helm/diff.png differ diff --git a/images/deployments/helm/full-helm-pipeline.png b/images/deployments/helm/full-helm-pipeline.png new file mode 100644 index 00000000..429cbe4c Binary files /dev/null and b/images/deployments/helm/full-helm-pipeline.png differ diff --git a/images/deployments/helm/helm-badge.png b/images/deployments/helm/helm-badge.png new file mode 100644 index 00000000..ff284dcd Binary files /dev/null and b/images/deployments/helm/helm-badge.png differ diff --git a/images/deployments/helm/helm-commit-message.png b/images/deployments/helm/helm-commit-message.png new file mode 100644 index 00000000..b9433254 Binary files /dev/null and b/images/deployments/helm/helm-commit-message.png differ diff --git a/images/deployments/helm/helm-release-dashboard.png b/images/deployments/helm/helm-release-dashboard.png new file mode 100644 index 00000000..c7078f88 Binary files /dev/null and b/images/deployments/helm/helm-release-dashboard.png differ diff --git a/images/deployments/helm/history.png b/images/deployments/helm/history.png new file mode 100644 index 00000000..c00e3e5a Binary files /dev/null and b/images/deployments/helm/history.png differ diff --git a/images/deployments/helm/import-helm-configuration.png b/images/deployments/helm/import-helm-configuration.png new file mode 100644 index 00000000..17fbbf3c Binary files /dev/null and b/images/deployments/helm/import-helm-configuration.png differ diff --git a/images/deployments/helm/k8s-name.png b/images/deployments/helm/k8s-name.png new file mode 100644 index 00000000..24197bb2 Binary files /dev/null and b/images/deployments/helm/k8s-name.png differ diff --git a/images/deployments/helm/managed-helm-repo.png b/images/deployments/helm/managed-helm-repo.png new file mode 100644 index 00000000..0a9d0c5b Binary files /dev/null and b/images/deployments/helm/managed-helm-repo.png differ diff --git a/images/deployments/helm/manifests.png b/images/deployments/helm/manifests.png new file mode 100644 index 00000000..67e4ad30 Binary files /dev/null and b/images/deployments/helm/manifests.png differ diff --git a/images/deployments/helm/override-helm-actions.png b/images/deployments/helm/override-helm-actions.png new file mode 100644 index 00000000..db9a39df Binary files /dev/null and b/images/deployments/helm/override-helm-actions.png differ diff --git a/images/deployments/helm/promotion/board-management.png b/images/deployments/helm/promotion/board-management.png new file mode 100644 index 00000000..8b99e0a8 Binary files /dev/null and b/images/deployments/helm/promotion/board-management.png differ diff --git a/images/deployments/helm/promotion/board-selection.png b/images/deployments/helm/promotion/board-selection.png new file mode 100644 index 00000000..01922b53 Binary files /dev/null and b/images/deployments/helm/promotion/board-selection.png differ diff --git a/images/deployments/helm/promotion/board.png b/images/deployments/helm/promotion/board.png new file mode 100644 index 00000000..bbd35458 Binary files /dev/null and b/images/deployments/helm/promotion/board.png differ diff --git a/images/deployments/helm/promotion/edit-helm-environment.png b/images/deployments/helm/promotion/edit-helm-environment.png new file mode 100644 index 00000000..7e06aa85 Binary files /dev/null and b/images/deployments/helm/promotion/edit-helm-environment.png differ diff --git a/images/deployments/helm/promotion/edit-value.png b/images/deployments/helm/promotion/edit-value.png new file mode 100644 index 00000000..b28abfb2 Binary files /dev/null and b/images/deployments/helm/promotion/edit-value.png differ diff --git a/images/deployments/helm/promotion/expand.png b/images/deployments/helm/promotion/expand.png new file mode 100644 index 00000000..9e4d063f Binary files /dev/null and b/images/deployments/helm/promotion/expand.png differ diff --git a/images/deployments/helm/promotion/filter.png b/images/deployments/helm/promotion/filter.png new file mode 100644 index 00000000..a429fd40 Binary files /dev/null and b/images/deployments/helm/promotion/filter.png differ diff --git a/images/deployments/helm/promotion/helm-environments.png b/images/deployments/helm/promotion/helm-environments.png new file mode 100644 index 00000000..4b584979 Binary files /dev/null and b/images/deployments/helm/promotion/helm-environments.png differ diff --git a/images/deployments/helm/promotion/promote-settings.png b/images/deployments/helm/promotion/promote-settings.png new file mode 100644 index 00000000..4d965ac5 Binary files /dev/null and b/images/deployments/helm/promotion/promote-settings.png differ diff --git a/images/deployments/helm/promotion/promote.png b/images/deployments/helm/promotion/promote.png new file mode 100644 index 00000000..94bd01a8 Binary files /dev/null and b/images/deployments/helm/promotion/promote.png differ diff --git a/images/deployments/helm/promotion/shift-left.png b/images/deployments/helm/promotion/shift-left.png new file mode 100644 index 00000000..cce3c937 Binary files /dev/null and b/images/deployments/helm/promotion/shift-left.png differ diff --git a/images/deployments/helm/promotion/shift-right.png b/images/deployments/helm/promotion/shift-right.png new file mode 100644 index 00000000..54d1a2ff Binary files /dev/null and b/images/deployments/helm/promotion/shift-right.png differ diff --git a/images/deployments/helm/promotion/value-options.png b/images/deployments/helm/promotion/value-options.png new file mode 100644 index 00000000..5ce6820f Binary files /dev/null and b/images/deployments/helm/promotion/value-options.png differ diff --git a/images/deployments/helm/quick-helm-integration.png b/images/deployments/helm/quick-helm-integration.png new file mode 100644 index 00000000..d6e7ced4 Binary files /dev/null and b/images/deployments/helm/quick-helm-integration.png differ diff --git a/images/deployments/helm/rollback.png b/images/deployments/helm/rollback.png new file mode 100644 index 00000000..5ff38aae Binary files /dev/null and b/images/deployments/helm/rollback.png differ diff --git a/images/deployments/helm/services.png b/images/deployments/helm/services.png new file mode 100644 index 00000000..e9e00b23 Binary files /dev/null and b/images/deployments/helm/services.png differ diff --git a/images/deployments/kubernetes/define-k8s-deployment-resource.png b/images/deployments/kubernetes/define-k8s-deployment-resource.png new file mode 100644 index 00000000..381ea2e7 Binary files /dev/null and b/images/deployments/kubernetes/define-k8s-deployment-resource.png differ diff --git a/images/deployments/kubernetes/define-k8s-service-resource.png b/images/deployments/kubernetes/define-k8s-service-resource.png new file mode 100644 index 00000000..2488975b Binary files /dev/null and b/images/deployments/kubernetes/define-k8s-service-resource.png differ diff --git a/images/deployments/kubernetes/deploying-private-cf-registry.png b/images/deployments/kubernetes/deploying-private-cf-registry.png new file mode 100644 index 00000000..92b9c979 Binary files /dev/null and b/images/deployments/kubernetes/deploying-private-cf-registry.png differ diff --git a/images/deployments/kubernetes/describe-k8s-deployment.png b/images/deployments/kubernetes/describe-k8s-deployment.png new file mode 100644 index 00000000..68be6815 Binary files /dev/null and b/images/deployments/kubernetes/describe-k8s-deployment.png differ diff --git a/images/deployments/kubernetes/environment-variables-deployment.png b/images/deployments/kubernetes/environment-variables-deployment.png new file mode 100644 index 00000000..6b90e010 Binary files /dev/null and b/images/deployments/kubernetes/environment-variables-deployment.png differ diff --git a/images/deployments/kubernetes/grid-view.png b/images/deployments/kubernetes/grid-view.png new file mode 100644 index 00000000..d05e9e55 Binary files /dev/null and b/images/deployments/kubernetes/grid-view.png differ diff --git a/images/deployments/kubernetes/kube-context.png b/images/deployments/kubernetes/kube-context.png new file mode 100644 index 00000000..c94744a6 Binary files /dev/null and b/images/deployments/kubernetes/kube-context.png differ diff --git a/images/deployments/kubernetes/kubernetes-dashboard.png b/images/deployments/kubernetes/kubernetes-dashboard.png new file mode 100644 index 00000000..1a78c079 Binary files /dev/null and b/images/deployments/kubernetes/kubernetes-dashboard.png differ diff --git a/images/deployments/kubernetes/parallel-kubectl.png b/images/deployments/kubernetes/parallel-kubectl.png new file mode 100644 index 00000000..9945bfe6 Binary files /dev/null and b/images/deployments/kubernetes/parallel-kubectl.png differ diff --git a/images/deployments/kubernetes/quick-ui-deploy.png b/images/deployments/kubernetes/quick-ui-deploy.png new file mode 100644 index 00000000..b2bc242d Binary files /dev/null and b/images/deployments/kubernetes/quick-ui-deploy.png differ diff --git a/images/examples/amazon-ecs/ecs-pipeline-deployment.png b/images/examples/amazon-ecs/ecs-pipeline-deployment.png new file mode 100644 index 00000000..cc4d3347 Binary files /dev/null and b/images/examples/amazon-ecs/ecs-pipeline-deployment.png differ diff --git a/images/examples/amazon-ecs/ecs-variables.png b/images/examples/amazon-ecs/ecs-variables.png new file mode 100644 index 00000000..f9bc5fa1 Binary files /dev/null and b/images/examples/amazon-ecs/ecs-variables.png differ diff --git a/images/examples/checkout/add-new-microservice.png b/images/examples/checkout/add-new-microservice.png new file mode 100644 index 00000000..58b965bc Binary files /dev/null and b/images/examples/checkout/add-new-microservice.png differ diff --git a/images/examples/checkout/add-new-microservice.svg b/images/examples/checkout/add-new-microservice.svg new file mode 100644 index 00000000..fd393f6b --- /dev/null +++ b/images/examples/checkout/add-new-microservice.svg @@ -0,0 +1,1348 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + +   + step + + +   + + + Pipeline + + +   + step + + + +   + step + + + + Trigger 1 + + + + + Trigger 2 + + + + Trigger 3 + + + + Trigger 4 + + + + + + + Microservice N + + Trigger N + + + + diff --git a/images/examples/checkout/simulate-trigger.png b/images/examples/checkout/simulate-trigger.png new file mode 100644 index 00000000..2c4da4ff Binary files /dev/null and b/images/examples/checkout/simulate-trigger.png differ diff --git a/images/examples/composition/launch-composition-example.png b/images/examples/composition/launch-composition-example.png new file mode 100644 index 00000000..ff5068a1 Binary files /dev/null and b/images/examples/composition/launch-composition-example.png differ diff --git a/images/examples/deployments/heroku-deployer-pipeline.png b/images/examples/deployments/heroku-deployer-pipeline.png new file mode 100644 index 00000000..3189634c Binary files /dev/null and b/images/examples/deployments/heroku-deployer-pipeline.png differ diff --git a/images/examples/deployments/heroku-deployer-variables.png b/images/examples/deployments/heroku-deployer-variables.png new file mode 100644 index 00000000..7fff37fb Binary files /dev/null and b/images/examples/deployments/heroku-deployer-variables.png differ diff --git a/images/examples/deployments/heroku-deployer-variables2.png b/images/examples/deployments/heroku-deployer-variables2.png new file mode 100644 index 00000000..a8a944c5 Binary files /dev/null and b/images/examples/deployments/heroku-deployer-variables2.png differ diff --git a/images/examples/deployments/heroku-vanilla-push-pipeline.png b/images/examples/deployments/heroku-vanilla-push-pipeline.png new file mode 100644 index 00000000..d3ef1209 Binary files /dev/null and b/images/examples/deployments/heroku-vanilla-push-pipeline.png differ diff --git a/images/examples/deployments/k8s-deployment-CD-pipeline.png b/images/examples/deployments/k8s-deployment-CD-pipeline.png new file mode 100644 index 00000000..551ff840 Binary files /dev/null and b/images/examples/deployments/k8s-deployment-CD-pipeline.png differ diff --git a/images/examples/deployments/k8s-deployment-ci-pipeline.png b/images/examples/deployments/k8s-deployment-ci-pipeline.png new file mode 100644 index 00000000..0e51832c Binary files /dev/null and b/images/examples/deployments/k8s-deployment-ci-pipeline.png differ diff --git a/images/examples/deployments/k8s-kustomize-dashboard.png b/images/examples/deployments/k8s-kustomize-dashboard.png new file mode 100644 index 00000000..2117ca0e Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-dashboard.png differ diff --git a/images/examples/deployments/k8s-kustomize-pipeline.png b/images/examples/deployments/k8s-kustomize-pipeline.png new file mode 100644 index 00000000..59ab7270 Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-pipeline.png differ diff --git a/images/examples/deployments/k8s-kustomize-prod-endpoint.png b/images/examples/deployments/k8s-kustomize-prod-endpoint.png new file mode 100644 index 00000000..1dc4639b Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-prod-endpoint.png differ diff --git a/images/examples/deployments/k8s-kustomize-prod-pipeline.png b/images/examples/deployments/k8s-kustomize-prod-pipeline.png new file mode 100644 index 00000000..91ee13d0 Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-prod-pipeline.png differ diff --git a/images/examples/deployments/k8s-kustomize-staging-endpoint.png b/images/examples/deployments/k8s-kustomize-staging-endpoint.png new file mode 100644 index 00000000..fb9d016b Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-staging-endpoint.png differ diff --git a/images/examples/deployments/k8s-kustomize-staging-pipeline.png b/images/examples/deployments/k8s-kustomize-staging-pipeline.png new file mode 100644 index 00000000..4984c624 Binary files /dev/null and b/images/examples/deployments/k8s-kustomize-staging-pipeline.png differ diff --git a/images/examples/deployments/scp-hello-world.png b/images/examples/deployments/scp-hello-world.png new file mode 100644 index 00000000..d375443d Binary files /dev/null and b/images/examples/deployments/scp-hello-world.png differ diff --git a/images/examples/deployments/scp-pipeline.png b/images/examples/deployments/scp-pipeline.png new file mode 100644 index 00000000..de281fb7 Binary files /dev/null and b/images/examples/deployments/scp-pipeline.png differ diff --git a/images/examples/deployments/scp-variables.png b/images/examples/deployments/scp-variables.png new file mode 100644 index 00000000..5d884180 Binary files /dev/null and b/images/examples/deployments/scp-variables.png differ diff --git a/images/examples/docker-build/auto-push-to-cfcr.png b/images/examples/docker-build/auto-push-to-cfcr.png new file mode 100644 index 00000000..6f0b1013 Binary files /dev/null and b/images/examples/docker-build/auto-push-to-cfcr.png differ diff --git a/images/examples/docker-build/build-and-push-pipeline.png b/images/examples/docker-build/build-and-push-pipeline.png new file mode 100644 index 00000000..9e4a943d Binary files /dev/null and b/images/examples/docker-build/build-and-push-pipeline.png differ diff --git a/images/examples/docker-build/build-dockerfile-root.png b/images/examples/docker-build/build-dockerfile-root.png new file mode 100644 index 00000000..d08ca037 Binary files /dev/null and b/images/examples/docker-build/build-dockerfile-root.png differ diff --git a/images/examples/docker-build/build-from-other-git-repo.png b/images/examples/docker-build/build-from-other-git-repo.png new file mode 100644 index 00000000..5c61a1e3 Binary files /dev/null and b/images/examples/docker-build/build-from-other-git-repo.png differ diff --git a/images/examples/docker-build/build-spefify-dockerfile.png b/images/examples/docker-build/build-spefify-dockerfile.png new file mode 100644 index 00000000..23e71e29 Binary files /dev/null and b/images/examples/docker-build/build-spefify-dockerfile.png differ diff --git a/images/examples/docker-build/cfcr-layers.png b/images/examples/docker-build/cfcr-layers.png new file mode 100644 index 00000000..ca67be1f Binary files /dev/null and b/images/examples/docker-build/cfcr-layers.png differ diff --git a/images/examples/docker-build/docker-build-arguments.png b/images/examples/docker-build/docker-build-arguments.png new file mode 100644 index 00000000..5907584a Binary files /dev/null and b/images/examples/docker-build/docker-build-arguments.png differ diff --git a/images/examples/docker-build/two-docker-images.png b/images/examples/docker-build/two-docker-images.png new file mode 100644 index 00000000..c7974d0d Binary files /dev/null and b/images/examples/docker-build/two-docker-images.png differ diff --git a/images/examples/docker-https/codefresh_nginx_container.png b/images/examples/docker-https/codefresh_nginx_container.png new file mode 100644 index 00000000..f2aea39d Binary files /dev/null and b/images/examples/docker-https/codefresh_nginx_container.png differ diff --git a/images/examples/docker-https/codefresh_webapp_container.png b/images/examples/docker-https/codefresh_webapp_container.png new file mode 100644 index 00000000..b56e30ae Binary files /dev/null and b/images/examples/docker-https/codefresh_webapp_container.png differ diff --git a/images/examples/docker-swarm/docker-swarm-pipeline.png b/images/examples/docker-swarm/docker-swarm-pipeline.png new file mode 100644 index 00000000..2fe7ed3c Binary files /dev/null and b/images/examples/docker-swarm/docker-swarm-pipeline.png differ diff --git a/images/examples/elastic-beanstalk/60d70d4-codefresh_eb_env_vars.png b/images/examples/elastic-beanstalk/60d70d4-codefresh_eb_env_vars.png new file mode 100644 index 00000000..2d481ef8 Binary files /dev/null and b/images/examples/elastic-beanstalk/60d70d4-codefresh_eb_env_vars.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png b/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png new file mode 100644 index 00000000..631cc70b Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_cf_step_deploy.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png b/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png new file mode 100644 index 00000000..2d481ef8 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_env_vars.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png b/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png new file mode 100644 index 00000000..f1f462b5 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_environment-deploy.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_environment.png b/images/examples/elastic-beanstalk/codefresh_eb_environment.png new file mode 100644 index 00000000..3b7f6fce Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_environment.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_health.png b/images/examples/elastic-beanstalk/codefresh_eb_health.png new file mode 100644 index 00000000..90e083b5 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_health.png differ diff --git a/images/examples/elastic-beanstalk/codefresh_eb_version_label.png b/images/examples/elastic-beanstalk/codefresh_eb_version_label.png new file mode 100644 index 00000000..62e33942 Binary files /dev/null and b/images/examples/elastic-beanstalk/codefresh_eb_version_label.png differ diff --git a/images/examples/git/sha-id-codefresh.png b/images/examples/git/sha-id-codefresh.png new file mode 100644 index 00000000..1b8b758d Binary files /dev/null and b/images/examples/git/sha-id-codefresh.png differ diff --git a/images/examples/git/sha-id-docker-hub.png b/images/examples/git/sha-id-docker-hub.png new file mode 100644 index 00000000..15bb4499 Binary files /dev/null and b/images/examples/git/sha-id-docker-hub.png differ diff --git a/images/examples/gs/gs-download-pipeline.png b/images/examples/gs/gs-download-pipeline.png new file mode 100644 index 00000000..cd704ccf Binary files /dev/null and b/images/examples/gs/gs-download-pipeline.png differ diff --git a/images/examples/gs/gs-pipeline-vars.png b/images/examples/gs/gs-pipeline-vars.png new file mode 100644 index 00000000..c356bc58 Binary files /dev/null and b/images/examples/gs/gs-pipeline-vars.png differ diff --git a/images/examples/gs/gs-upload-pipeline.png b/images/examples/gs/gs-upload-pipeline.png new file mode 100644 index 00000000..e6154ca4 Binary files /dev/null and b/images/examples/gs/gs-upload-pipeline.png differ diff --git a/images/examples/helm/helm-chart.png b/images/examples/helm/helm-chart.png new file mode 100644 index 00000000..54bfff9c Binary files /dev/null and b/images/examples/helm/helm-chart.png differ diff --git a/images/examples/helm/helm-deploy-pipeline.png b/images/examples/helm/helm-deploy-pipeline.png new file mode 100644 index 00000000..3cf18fa3 Binary files /dev/null and b/images/examples/helm/helm-deploy-pipeline.png differ diff --git a/images/examples/helm/helm-push-and-deploy-pipeline.png b/images/examples/helm/helm-push-and-deploy-pipeline.png new file mode 100644 index 00000000..284b6aa8 Binary files /dev/null and b/images/examples/helm/helm-push-and-deploy-pipeline.png differ diff --git a/images/examples/helm/helm-release.png b/images/examples/helm/helm-release.png new file mode 100644 index 00000000..aa25f473 Binary files /dev/null and b/images/examples/helm/helm-release.png differ diff --git a/images/examples/helm/import-helm-configuration.png b/images/examples/helm/import-helm-configuration.png new file mode 100644 index 00000000..538e04ab Binary files /dev/null and b/images/examples/helm/import-helm-configuration.png differ diff --git a/images/examples/integration-tests/integration-tests.png b/images/examples/integration-tests/integration-tests.png new file mode 100644 index 00000000..d1483324 Binary files /dev/null and b/images/examples/integration-tests/integration-tests.png differ diff --git a/images/examples/integration-tests/mongodb-integration-tests.png b/images/examples/integration-tests/mongodb-integration-tests.png new file mode 100644 index 00000000..78604436 Binary files /dev/null and b/images/examples/integration-tests/mongodb-integration-tests.png differ diff --git a/images/examples/integration-tests/mysql-integration-tests.png b/images/examples/integration-tests/mysql-integration-tests.png new file mode 100644 index 00000000..de46fed8 Binary files /dev/null and b/images/examples/integration-tests/mysql-integration-tests.png differ diff --git a/images/examples/integration-tests/postgresql-integration-tests.png b/images/examples/integration-tests/postgresql-integration-tests.png new file mode 100644 index 00000000..b661aecb Binary files /dev/null and b/images/examples/integration-tests/postgresql-integration-tests.png differ diff --git a/images/examples/integration-tests/preload-data-to-db.png b/images/examples/integration-tests/preload-data-to-db.png new file mode 100644 index 00000000..bc7762e3 Binary files /dev/null and b/images/examples/integration-tests/preload-data-to-db.png differ diff --git a/images/examples/integration-tests/redis-integration-tests.png b/images/examples/integration-tests/redis-integration-tests.png new file mode 100644 index 00000000..67b04e6e Binary files /dev/null and b/images/examples/integration-tests/redis-integration-tests.png differ diff --git a/images/examples/nested-pipelines/call-other-pipeline.png b/images/examples/nested-pipelines/call-other-pipeline.png new file mode 100644 index 00000000..dc170913 Binary files /dev/null and b/images/examples/nested-pipelines/call-other-pipeline.png differ diff --git a/images/examples/nomad/.keep b/images/examples/nomad/.keep new file mode 100644 index 00000000..e69de29b diff --git a/images/examples/nomad/nomad-ci-pipeline.png b/images/examples/nomad/nomad-ci-pipeline.png new file mode 100644 index 00000000..45f9c676 Binary files /dev/null and b/images/examples/nomad/nomad-ci-pipeline.png differ diff --git a/images/examples/nomad/nomad-ui-deployment.png b/images/examples/nomad/nomad-ui-deployment.png new file mode 100644 index 00000000..bf1b9736 Binary files /dev/null and b/images/examples/nomad/nomad-ui-deployment.png differ diff --git a/images/examples/nomad/nomad-variables.png b/images/examples/nomad/nomad-variables.png new file mode 100644 index 00000000..98de8aaa Binary files /dev/null and b/images/examples/nomad/nomad-variables.png differ diff --git a/images/examples/packer-gcloud/.keep b/images/examples/packer-gcloud/.keep new file mode 100644 index 00000000..e69de29b diff --git a/images/examples/packer-gcloud/packer-codefresh-pipeline.png b/images/examples/packer-gcloud/packer-codefresh-pipeline.png new file mode 100644 index 00000000..831361ea Binary files /dev/null and b/images/examples/packer-gcloud/packer-codefresh-pipeline.png differ diff --git a/images/examples/packer-gcloud/service-account-variable.png b/images/examples/packer-gcloud/service-account-variable.png new file mode 100644 index 00000000..6da5c0c3 Binary files /dev/null and b/images/examples/packer-gcloud/service-account-variable.png differ diff --git a/images/examples/packer-gcloud/web-app-url.png b/images/examples/packer-gcloud/web-app-url.png new file mode 100644 index 00000000..b5e35624 Binary files /dev/null and b/images/examples/packer-gcloud/web-app-url.png differ diff --git a/images/examples/php-file-transfer/pipeline.png b/images/examples/php-file-transfer/pipeline.png new file mode 100644 index 00000000..bc8e1d54 Binary files /dev/null and b/images/examples/php-file-transfer/pipeline.png differ diff --git a/images/examples/php-file-transfer/test-environment.png b/images/examples/php-file-transfer/test-environment.png new file mode 100644 index 00000000..53f6fe80 Binary files /dev/null and b/images/examples/php-file-transfer/test-environment.png differ diff --git a/images/examples/php-file-transfer/variables.png b/images/examples/php-file-transfer/variables.png new file mode 100644 index 00000000..12e59682 Binary files /dev/null and b/images/examples/php-file-transfer/variables.png differ diff --git a/images/examples/pulumi/pulumi-access-token.png b/images/examples/pulumi/pulumi-access-token.png new file mode 100644 index 00000000..b060c710 Binary files /dev/null and b/images/examples/pulumi/pulumi-access-token.png differ diff --git a/images/examples/pulumi/pulumi-pipeline.png b/images/examples/pulumi/pulumi-pipeline.png new file mode 100644 index 00000000..c685ecae Binary files /dev/null and b/images/examples/pulumi/pulumi-pipeline.png differ diff --git a/images/examples/scala/multi-stage-pipeline.png b/images/examples/scala/multi-stage-pipeline.png new file mode 100644 index 00000000..fccce93c Binary files /dev/null and b/images/examples/scala/multi-stage-pipeline.png differ diff --git a/images/examples/scala/pipeline.png b/images/examples/scala/pipeline.png new file mode 100644 index 00000000..0ad5f393 Binary files /dev/null and b/images/examples/scala/pipeline.png differ diff --git a/images/examples/scala/single-stage-pipeline.png b/images/examples/scala/single-stage-pipeline.png new file mode 100644 index 00000000..a6c34e5c Binary files /dev/null and b/images/examples/scala/single-stage-pipeline.png differ diff --git a/images/examples/sealed-secrets/add-app.png b/images/examples/sealed-secrets/add-app.png new file mode 100644 index 00000000..e90655be Binary files /dev/null and b/images/examples/sealed-secrets/add-app.png differ diff --git a/images/examples/sealed-secrets/app-secrets.png b/images/examples/sealed-secrets/app-secrets.png new file mode 100644 index 00000000..71f7c905 Binary files /dev/null and b/images/examples/sealed-secrets/app-secrets.png differ diff --git a/images/examples/sealed-secrets/current-state.png b/images/examples/sealed-secrets/current-state.png new file mode 100644 index 00000000..afab1258 Binary files /dev/null and b/images/examples/sealed-secrets/current-state.png differ diff --git a/images/examples/secrets/mozilla-sops-pipeline-vars.png b/images/examples/secrets/mozilla-sops-pipeline-vars.png new file mode 100644 index 00000000..fdbef99c Binary files /dev/null and b/images/examples/secrets/mozilla-sops-pipeline-vars.png differ diff --git a/images/examples/secrets/mozilla-sops-pipeline.png b/images/examples/secrets/mozilla-sops-pipeline.png new file mode 100644 index 00000000..977e6ac0 Binary files /dev/null and b/images/examples/secrets/mozilla-sops-pipeline.png differ diff --git a/images/examples/secrets/vault-pipeline.png b/images/examples/secrets/vault-pipeline.png new file mode 100644 index 00000000..ce4245ab Binary files /dev/null and b/images/examples/secrets/vault-pipeline.png differ diff --git a/images/examples/secrets/vault-pipeline2.png b/images/examples/secrets/vault-pipeline2.png new file mode 100644 index 00000000..3b53a97c Binary files /dev/null and b/images/examples/secrets/vault-pipeline2.png differ diff --git a/images/examples/shared-workspace/volume-list.png b/images/examples/shared-workspace/volume-list.png new file mode 100644 index 00000000..dd81b2c7 Binary files /dev/null and b/images/examples/shared-workspace/volume-list.png differ diff --git a/images/examples/terraform/google_cloud_json.png b/images/examples/terraform/google_cloud_json.png new file mode 100644 index 00000000..489c0da2 Binary files /dev/null and b/images/examples/terraform/google_cloud_json.png differ diff --git a/images/examples/terraform/terraform-pipeline.png b/images/examples/terraform/terraform-pipeline.png new file mode 100644 index 00000000..7fa28781 Binary files /dev/null and b/images/examples/terraform/terraform-pipeline.png differ diff --git a/images/examples/unit-tests/fan-in-fan-out-pipeline.png b/images/examples/unit-tests/fan-in-fan-out-pipeline.png new file mode 100644 index 00000000..f4e24eb6 Binary files /dev/null and b/images/examples/unit-tests/fan-in-fan-out-pipeline.png differ diff --git a/images/examples/unit-tests/parallel-pipeline-examples.png b/images/examples/unit-tests/parallel-pipeline-examples.png new file mode 100644 index 00000000..8b171545 Binary files /dev/null and b/images/examples/unit-tests/parallel-pipeline-examples.png differ diff --git a/images/examples/unit-tests/unit-tests-pipeline.png b/images/examples/unit-tests/unit-tests-pipeline.png new file mode 100644 index 00000000..8f21d296 Binary files /dev/null and b/images/examples/unit-tests/unit-tests-pipeline.png differ diff --git a/images/administration/installation/architecture-high-level.png b/images/installation/architecture-high-level.png similarity index 100% rename from images/administration/installation/architecture-high-level.png rename to images/installation/architecture-high-level.png diff --git a/images/administration/installation/codefresh-saas.png b/images/installation/codefresh-saas.png similarity index 100% rename from images/administration/installation/codefresh-saas.png rename to images/installation/codefresh-saas.png diff --git a/images/administration/installation/hybrid-installation.png b/images/installation/hybrid-installation.png similarity index 100% rename from images/administration/installation/hybrid-installation.png rename to images/installation/hybrid-installation.png diff --git a/images/installation/soc2-type2-certified.png b/images/installation/soc2-type2-certified.png new file mode 100644 index 00000000..b1e59f37 Binary files /dev/null and b/images/installation/soc2-type2-certified.png differ diff --git a/images/administration/installation/topology-new.png b/images/installation/topology-new.png similarity index 100% rename from images/administration/installation/topology-new.png rename to images/installation/topology-new.png diff --git a/images/administration/installation/topology.png b/images/installation/topology.png similarity index 100% rename from images/administration/installation/topology.png rename to images/installation/topology.png diff --git a/images/invite-users.png b/images/invite-users.png new file mode 100644 index 00000000..d29a06a4 Binary files /dev/null and b/images/invite-users.png differ diff --git a/images/pipeline/badges/view-public-logs.png b/images/pipeline/badges/view-public-logs.png index 395c4046..5589bd62 100644 Binary files a/images/pipeline/badges/view-public-logs.png and b/images/pipeline/badges/view-public-logs.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png b/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png new file mode 100644 index 00000000..90a0263c Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/edit-project-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png b/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png new file mode 100644 index 00000000..6ec70a77 Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/view-build-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png b/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png new file mode 100644 index 00000000..f821f1ed Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/view-pipeline-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png b/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png new file mode 100644 index 00000000..01ad8404 Binary files /dev/null and b/images/pipeline/codefresh-yaml/annotations/view-project-annotations.png differ diff --git a/images/pipeline/codefresh-yaml/approval/approval-rule.png b/images/pipeline/codefresh-yaml/approval/approval-rule.png new file mode 100644 index 00000000..b77e4bd2 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/approval-rule.png differ diff --git a/images/pipeline/codefresh-yaml/approval/approval-waiting.png b/images/pipeline/codefresh-yaml/approval/approval-waiting.png new file mode 100644 index 00000000..88dbd12a Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/approval-waiting.png differ diff --git a/images/pipeline/codefresh-yaml/approval/build-waiting.png b/images/pipeline/codefresh-yaml/approval/build-waiting.png new file mode 100644 index 00000000..e594f9b8 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/build-waiting.png differ diff --git a/images/pipeline/codefresh-yaml/approval/keep-volume.png b/images/pipeline/codefresh-yaml/approval/keep-volume.png new file mode 100644 index 00000000..d73b466a Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/keep-volume.png differ diff --git a/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png b/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png new file mode 100644 index 00000000..44acd761 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/pipeline-rejected.png differ diff --git a/images/pipeline/codefresh-yaml/approval/pipeline-tag.png b/images/pipeline/codefresh-yaml/approval/pipeline-tag.png new file mode 100644 index 00000000..6c5b7b7d Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/pipeline-tag.png differ diff --git a/images/pipeline/codefresh-yaml/approval/slack-approval.png b/images/pipeline/codefresh-yaml/approval/slack-approval.png new file mode 100644 index 00000000..a5c3c9a4 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/slack-approval.png differ diff --git a/images/pipeline/codefresh-yaml/approval/slack-settings.png b/images/pipeline/codefresh-yaml/approval/slack-settings.png new file mode 100644 index 00000000..0d88cd27 Binary files /dev/null and b/images/pipeline/codefresh-yaml/approval/slack-settings.png differ diff --git a/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png b/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png new file mode 100644 index 00000000..7fe8d4db Binary files /dev/null and b/images/pipeline/codefresh-yaml/docker-image-metadata/annotations.png differ diff --git a/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png b/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png new file mode 100644 index 00000000..f6d29822 Binary files /dev/null and b/images/pipeline/codefresh-yaml/docker-image-metadata/metadata.png differ diff --git a/images/pipeline/codefresh-yaml/environments/environments.png b/images/pipeline/codefresh-yaml/environments/environments.png new file mode 100644 index 00000000..2ad21c0d Binary files /dev/null and b/images/pipeline/codefresh-yaml/environments/environments.png differ diff --git a/images/pipeline/codefresh-yaml/environments/helm-environment.png b/images/pipeline/codefresh-yaml/environments/helm-environment.png new file mode 100644 index 00000000..0c314034 Binary files /dev/null and b/images/pipeline/codefresh-yaml/environments/helm-environment.png differ diff --git a/images/pipeline/codefresh-yaml/environments/k8s-environment.png b/images/pipeline/codefresh-yaml/environments/k8s-environment.png new file mode 100644 index 00000000..a556f9d2 Binary files /dev/null and b/images/pipeline/codefresh-yaml/environments/k8s-environment.png differ diff --git a/images/pipeline/codefresh-yaml/existing-composition.png b/images/pipeline/codefresh-yaml/existing-composition.png new file mode 100644 index 00000000..93b91ef6 Binary files /dev/null and b/images/pipeline/codefresh-yaml/existing-composition.png differ diff --git a/images/pipeline/codefresh-yaml/hooks/before-pipeline.png b/images/pipeline/codefresh-yaml/hooks/before-pipeline.png new file mode 100644 index 00000000..f259f530 Binary files /dev/null and b/images/pipeline/codefresh-yaml/hooks/before-pipeline.png differ diff --git a/images/pipeline/codefresh-yaml/hooks/cleanup-step.png b/images/pipeline/codefresh-yaml/hooks/cleanup-step.png new file mode 100644 index 00000000..13e9a18d Binary files /dev/null and b/images/pipeline/codefresh-yaml/hooks/cleanup-step.png differ diff --git a/images/pipeline/codefresh-yaml/hooks/step-after.png b/images/pipeline/codefresh-yaml/hooks/step-after.png new file mode 100644 index 00000000..e7b48297 Binary files /dev/null and b/images/pipeline/codefresh-yaml/hooks/step-after.png differ diff --git a/images/pipeline/codefresh-yaml/inline-editor.png b/images/pipeline/codefresh-yaml/inline-editor.png new file mode 100644 index 00000000..f7ae6c6f Binary files /dev/null and b/images/pipeline/codefresh-yaml/inline-editor.png differ diff --git a/images/pipeline/codefresh-yaml/parallel-push.png b/images/pipeline/codefresh-yaml/parallel-push.png new file mode 100644 index 00000000..2afa2d21 Binary files /dev/null and b/images/pipeline/codefresh-yaml/parallel-push.png differ diff --git a/images/pipeline/codefresh-yaml/redis-example.png b/images/pipeline/codefresh-yaml/redis-example.png new file mode 100644 index 00000000..ec621922 Binary files /dev/null and b/images/pipeline/codefresh-yaml/redis-example.png differ diff --git a/images/pipeline/codefresh-yaml/services/services-tab.png b/images/pipeline/codefresh-yaml/services/services-tab.png new file mode 100644 index 00000000..4e8aa081 Binary files /dev/null and b/images/pipeline/codefresh-yaml/services/services-tab.png differ diff --git a/images/pipeline/codefresh-yaml/stages/complex-pipeline.png b/images/pipeline/codefresh-yaml/stages/complex-pipeline.png new file mode 100644 index 00000000..862856df Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/complex-pipeline.png differ diff --git a/images/pipeline/codefresh-yaml/stages/complex.png b/images/pipeline/codefresh-yaml/stages/complex.png new file mode 100644 index 00000000..02ee476b Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/complex.png differ diff --git a/images/pipeline/codefresh-yaml/stages/example.png b/images/pipeline/codefresh-yaml/stages/example.png new file mode 100644 index 00000000..f08a9f74 Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/example.png differ diff --git a/images/pipeline/codefresh-yaml/stages/linear-view.png b/images/pipeline/codefresh-yaml/stages/linear-view.png new file mode 100644 index 00000000..829121af Binary files /dev/null and b/images/pipeline/codefresh-yaml/stages/linear-view.png differ diff --git a/images/pipeline/codefresh-yaml/steps/choose-step.png b/images/pipeline/codefresh-yaml/steps/choose-step.png new file mode 100644 index 00000000..f8a832af Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/choose-step.png differ diff --git a/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png b/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png new file mode 100644 index 00000000..fb3237f7 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/codefresh-registry-list.png differ diff --git a/images/pipeline/codefresh-yaml/steps/create-custom-step.png b/images/pipeline/codefresh-yaml/steps/create-custom-step.png new file mode 100644 index 00000000..884da4ef Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/create-custom-step.png differ diff --git a/images/pipeline/codefresh-yaml/steps/create-plugin-image.png b/images/pipeline/codefresh-yaml/steps/create-plugin-image.png new file mode 100644 index 00000000..49d3741a Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/create-plugin-image.png differ diff --git a/images/pipeline/codefresh-yaml/steps/example-git-providers.png b/images/pipeline/codefresh-yaml/steps/example-git-providers.png new file mode 100644 index 00000000..2362a217 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/example-git-providers.png differ diff --git a/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png b/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png new file mode 100644 index 00000000..c7389ab9 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/input-parameters-definition.png differ diff --git a/images/pipeline/codefresh-yaml/steps/input-parameters.png b/images/pipeline/codefresh-yaml/steps/input-parameters.png new file mode 100644 index 00000000..63c11583 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/input-parameters.png differ diff --git a/images/pipeline/codefresh-yaml/steps/multi-checkout.png b/images/pipeline/codefresh-yaml/steps/multi-checkout.png new file mode 100644 index 00000000..919f3f1a Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/multi-checkout.png differ diff --git a/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png b/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png new file mode 100644 index 00000000..6bc7e472 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/output-parameters-definition.png differ diff --git a/images/pipeline/codefresh-yaml/steps/plugin-parameters.png b/images/pipeline/codefresh-yaml/steps/plugin-parameters.png new file mode 100644 index 00000000..b273bfb8 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/plugin-parameters.png differ diff --git a/images/pipeline/codefresh-yaml/steps/plugin-usage.png b/images/pipeline/codefresh-yaml/steps/plugin-usage.png new file mode 100644 index 00000000..3f0a53e4 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/plugin-usage.png differ diff --git a/images/pipeline/codefresh-yaml/steps/proxy-variables.png b/images/pipeline/codefresh-yaml/steps/proxy-variables.png new file mode 100644 index 00000000..26725f54 Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/proxy-variables.png differ diff --git a/images/pipeline/codefresh-yaml/steps/step-versions.png b/images/pipeline/codefresh-yaml/steps/step-versions.png new file mode 100644 index 00000000..1976dc0c Binary files /dev/null and b/images/pipeline/codefresh-yaml/steps/step-versions.png differ diff --git a/images/pipeline/codefresh-yaml/variables/encrypted-variables.png b/images/pipeline/codefresh-yaml/variables/encrypted-variables.png new file mode 100644 index 00000000..68f82a42 Binary files /dev/null and b/images/pipeline/codefresh-yaml/variables/encrypted-variables.png differ diff --git a/images/pipeline/codefresh-yaml/variables/masked-variables.png b/images/pipeline/codefresh-yaml/variables/masked-variables.png new file mode 100644 index 00000000..d097f4f2 Binary files /dev/null and b/images/pipeline/codefresh-yaml/variables/masked-variables.png differ diff --git a/images/pipeline/create/add-pipeline-to-project.png b/images/pipeline/create/add-pipeline-to-project.png new file mode 100644 index 00000000..41fd8dd8 Binary files /dev/null and b/images/pipeline/create/add-pipeline-to-project.png differ diff --git a/images/pipeline/create/create-template-menu.png b/images/pipeline/create/create-template-menu.png new file mode 100644 index 00000000..1259fa3e Binary files /dev/null and b/images/pipeline/create/create-template-menu.png differ diff --git a/images/pipeline/create/custom-yml.png b/images/pipeline/create/custom-yml.png new file mode 100644 index 00000000..925d9ad8 Binary files /dev/null and b/images/pipeline/create/custom-yml.png differ diff --git a/images/pipeline/create/editor.png b/images/pipeline/create/editor.png new file mode 100644 index 00000000..cea04bef Binary files /dev/null and b/images/pipeline/create/editor.png differ diff --git a/images/pipeline/create/external-resources.png b/images/pipeline/create/external-resources.png new file mode 100644 index 00000000..559439dc Binary files /dev/null and b/images/pipeline/create/external-resources.png differ diff --git a/images/pipeline/create/inline-editor.png b/images/pipeline/create/inline-editor.png new file mode 100644 index 00000000..94c82166 Binary files /dev/null and b/images/pipeline/create/inline-editor.png differ diff --git a/images/pipeline/create/pipeline-from-internal-repo.png b/images/pipeline/create/pipeline-from-internal-repo.png new file mode 100644 index 00000000..29d47914 Binary files /dev/null and b/images/pipeline/create/pipeline-from-internal-repo.png differ diff --git a/images/pipeline/create/pipelines-from-repository.png b/images/pipeline/create/pipelines-from-repository.png new file mode 100644 index 00000000..b1205e53 Binary files /dev/null and b/images/pipeline/create/pipelines-from-repository.png differ diff --git a/images/pipeline/create/pipelines-no-repository.png b/images/pipeline/create/pipelines-no-repository.png new file mode 100644 index 00000000..7b9c7e39 Binary files /dev/null and b/images/pipeline/create/pipelines-no-repository.png differ diff --git a/images/pipeline/create/predefined-steps.png b/images/pipeline/create/predefined-steps.png new file mode 100644 index 00000000..15a6939d Binary files /dev/null and b/images/pipeline/create/predefined-steps.png differ diff --git a/images/pipeline/create/set-build-disk-space.png b/images/pipeline/create/set-build-disk-space.png new file mode 100644 index 00000000..374d2cd0 Binary files /dev/null and b/images/pipeline/create/set-build-disk-space.png differ diff --git a/images/pipeline/create/template-tag.png b/images/pipeline/create/template-tag.png index bd950367..17313a44 100644 Binary files a/images/pipeline/create/template-tag.png and b/images/pipeline/create/template-tag.png differ diff --git a/images/pipeline/docker-image/example2.png b/images/pipeline/docker-image/example2.png new file mode 100644 index 00000000..954f75b1 Binary files /dev/null and b/images/pipeline/docker-image/example2.png differ diff --git a/images/pipeline/docker-image/quality-image-annotation.png b/images/pipeline/docker-image/quality-image-annotation.png new file mode 100644 index 00000000..395b8335 Binary files /dev/null and b/images/pipeline/docker-image/quality-image-annotation.png differ diff --git a/images/pipeline/monitoring/build-variables-list.png b/images/pipeline/monitoring/build-variables-list.png new file mode 100644 index 00000000..2b8d45fa Binary files /dev/null and b/images/pipeline/monitoring/build-variables-list.png differ diff --git a/images/pipeline/monitoring/build-variables-view-option.png b/images/pipeline/monitoring/build-variables-view-option.png new file mode 100644 index 00000000..41398f6f Binary files /dev/null and b/images/pipeline/monitoring/build-variables-view-option.png differ diff --git a/images/pipeline/monitoring/child-parent-build-info.png b/images/pipeline/monitoring/child-parent-build-info.png new file mode 100644 index 00000000..421318b0 Binary files /dev/null and b/images/pipeline/monitoring/child-parent-build-info.png differ diff --git a/images/pipeline/monitoring/icon-child-build.png b/images/pipeline/monitoring/icon-child-build.png new file mode 100644 index 00000000..6973f9af Binary files /dev/null and b/images/pipeline/monitoring/icon-child-build.png differ diff --git a/images/pipeline/monitoring/pipeline-view.png b/images/pipeline/monitoring/pipeline-view.png index ae7acacb..90422f9d 100644 Binary files a/images/pipeline/monitoring/pipeline-view.png and b/images/pipeline/monitoring/pipeline-view.png differ diff --git a/images/pipeline/monitoring/step-status-approved.png b/images/pipeline/monitoring/step-status-approved.png new file mode 100644 index 00000000..a6b1048d Binary files /dev/null and b/images/pipeline/monitoring/step-status-approved.png differ diff --git a/images/pipeline/monitoring/step-status-denied.png b/images/pipeline/monitoring/step-status-denied.png new file mode 100644 index 00000000..56b962ce Binary files /dev/null and b/images/pipeline/monitoring/step-status-denied.png differ diff --git a/images/pipeline/monitoring/step-status-error.png b/images/pipeline/monitoring/step-status-error.png new file mode 100644 index 00000000..fe591e40 Binary files /dev/null and b/images/pipeline/monitoring/step-status-error.png differ diff --git a/images/pipeline/monitoring/step-status-running-debug.png b/images/pipeline/monitoring/step-status-running-debug.png new file mode 100644 index 00000000..8175de43 Binary files /dev/null and b/images/pipeline/monitoring/step-status-running-debug.png differ diff --git a/images/pipeline/monitoring/step-status-running.png b/images/pipeline/monitoring/step-status-running.png new file mode 100644 index 00000000..0761f121 Binary files /dev/null and b/images/pipeline/monitoring/step-status-running.png differ diff --git a/images/pipeline/monitoring/step-status-success.gif b/images/pipeline/monitoring/step-status-success.gif new file mode 100644 index 00000000..53cb7ace Binary files /dev/null and b/images/pipeline/monitoring/step-status-success.gif differ diff --git a/images/pipeline/monitoring/step-status-success.png b/images/pipeline/monitoring/step-status-success.png new file mode 100644 index 00000000..6303d1b1 Binary files /dev/null and b/images/pipeline/monitoring/step-status-success.png differ diff --git a/images/pipeline/monitoring/step-status-terminated.png b/images/pipeline/monitoring/step-status-terminated.png new file mode 100644 index 00000000..c30bc449 Binary files /dev/null and b/images/pipeline/monitoring/step-status-terminated.png differ diff --git a/images/pipeline/monitoring/step-status-terminating.png b/images/pipeline/monitoring/step-status-terminating.png new file mode 100644 index 00000000..f38ebcdf Binary files /dev/null and b/images/pipeline/monitoring/step-status-terminating.png differ diff --git a/images/pipeline/pipeline-settings/pause-pipeline-enabled.png b/images/pipeline/pipeline-settings/pause-pipeline-enabled.png new file mode 100644 index 00000000..8c7c4305 Binary files /dev/null and b/images/pipeline/pipeline-settings/pause-pipeline-enabled.png differ diff --git a/images/pipeline/pipeline-settings/pipeline-settings-ui.png b/images/pipeline/pipeline-settings/pipeline-settings-ui.png new file mode 100644 index 00000000..105544c9 Binary files /dev/null and b/images/pipeline/pipeline-settings/pipeline-settings-ui.png differ diff --git a/images/pipeline/triggers/pr-comment-trigger-options.png b/images/pipeline/triggers/pr-comment-trigger-options.png new file mode 100644 index 00000000..854666ed Binary files /dev/null and b/images/pipeline/triggers/pr-comment-trigger-options.png differ diff --git a/images/sso/azure/1-azure-service.png b/images/sso/azure/1-azure-service.png deleted file mode 100644 index e24b0db1..00000000 Binary files a/images/sso/azure/1-azure-service.png and /dev/null differ diff --git a/images/sso/azure/10-Add-client-secret.png b/images/sso/azure/10-Add-client-secret.png deleted file mode 100644 index 9eb92944..00000000 Binary files a/images/sso/azure/10-Add-client-secret.png and /dev/null differ diff --git a/images/sso/azure/11-Set-reply-url.png b/images/sso/azure/11-Set-reply-url.png deleted file mode 100644 index 35864802..00000000 Binary files a/images/sso/azure/11-Set-reply-url.png and /dev/null differ diff --git a/images/sso/azure/12-set-reply-URL.png b/images/sso/azure/12-set-reply-URL.png deleted file mode 100644 index 22b82819..00000000 Binary files a/images/sso/azure/12-set-reply-URL.png and /dev/null differ diff --git a/images/sso/azure/13-Enable-ID-Tokens.png b/images/sso/azure/13-Enable-ID-Tokens.png deleted file mode 100644 index d76d48d7..00000000 Binary files a/images/sso/azure/13-Enable-ID-Tokens.png and /dev/null differ diff --git a/images/sso/azure/2-app-registrations.png b/images/sso/azure/2-app-registrations.png deleted file mode 100644 index e9e79309..00000000 Binary files a/images/sso/azure/2-app-registrations.png and /dev/null differ diff --git a/images/sso/azure/3-register-an-app.png b/images/sso/azure/3-register-an-app.png deleted file mode 100644 index 3a3d4c15..00000000 Binary files a/images/sso/azure/3-register-an-app.png and /dev/null differ diff --git a/images/sso/azure/4-created-app.png b/images/sso/azure/4-created-app.png deleted file mode 100644 index 3a1c8a96..00000000 Binary files a/images/sso/azure/4-created-app.png and /dev/null differ diff --git a/images/sso/azure/5-api-permissions.png b/images/sso/azure/5-api-permissions.png deleted file mode 100644 index b64467ad..00000000 Binary files a/images/sso/azure/5-api-permissions.png and /dev/null differ diff --git a/images/sso/azure/6-request-api-permissions.png b/images/sso/azure/6-request-api-permissions.png deleted file mode 100644 index b344dd24..00000000 Binary files a/images/sso/azure/6-request-api-permissions.png and /dev/null differ diff --git a/images/sso/azure/7-request-api-permissions.png b/images/sso/azure/7-request-api-permissions.png deleted file mode 100644 index 94d41031..00000000 Binary files a/images/sso/azure/7-request-api-permissions.png and /dev/null differ diff --git a/images/sso/azure/8-Enabled-permissions.png b/images/sso/azure/8-Enabled-permissions.png deleted file mode 100644 index 212c0aca..00000000 Binary files a/images/sso/azure/8-Enabled-permissions.png and /dev/null differ diff --git a/images/sso/azure/9-Create-secret-page.png b/images/sso/azure/9-Create-secret-page.png deleted file mode 100644 index 040bcb4b..00000000 Binary files a/images/sso/azure/9-Create-secret-page.png and /dev/null differ diff --git a/images/sso/azure/azure-properties-object-app-ids.png b/images/sso/azure/azure-properties-object-app-ids.png new file mode 100644 index 00000000..fc81be46 Binary files /dev/null and b/images/sso/azure/azure-properties-object-app-ids.png differ diff --git a/images/sso/azure/azure-step01.png b/images/sso/azure/azure-step01.png deleted file mode 100644 index c5b2169a..00000000 Binary files a/images/sso/azure/azure-step01.png and /dev/null differ diff --git a/images/sso/azure/azure-step02.png b/images/sso/azure/azure-step02.png deleted file mode 100644 index d1a3375a..00000000 Binary files a/images/sso/azure/azure-step02.png and /dev/null differ diff --git a/images/sso/azure/azure-step03.png b/images/sso/azure/azure-step03.png deleted file mode 100644 index ea122618..00000000 Binary files a/images/sso/azure/azure-step03.png and /dev/null differ diff --git a/images/sso/azure/azure-step04.png b/images/sso/azure/azure-step04.png deleted file mode 100644 index ee1506cb..00000000 Binary files a/images/sso/azure/azure-step04.png and /dev/null differ diff --git a/images/sso/azure/azure-step05.png b/images/sso/azure/azure-step05.png deleted file mode 100644 index 8968ec58..00000000 Binary files a/images/sso/azure/azure-step05.png and /dev/null differ diff --git a/images/sso/azure/azure-step06.png b/images/sso/azure/azure-step06.png deleted file mode 100644 index 9121527a..00000000 Binary files a/images/sso/azure/azure-step06.png and /dev/null differ diff --git a/images/sso/azure/azure-step07.png b/images/sso/azure/azure-step07.png deleted file mode 100644 index efdd321f..00000000 Binary files a/images/sso/azure/azure-step07.png and /dev/null differ diff --git a/images/sso/azure/azure-step08.png b/images/sso/azure/azure-step08.png deleted file mode 100644 index f0687e65..00000000 Binary files a/images/sso/azure/azure-step08.png and /dev/null differ diff --git a/images/sso/azure/azure-step09.png b/images/sso/azure/azure-step09.png deleted file mode 100644 index 4ebef717..00000000 Binary files a/images/sso/azure/azure-step09.png and /dev/null differ diff --git a/images/sso/azure/azure-step10.png b/images/sso/azure/azure-step10.png deleted file mode 100644 index 64e47407..00000000 Binary files a/images/sso/azure/azure-step10.png and /dev/null differ diff --git a/images/sso/azure/azure-step5.png b/images/sso/azure/azure-step5.png deleted file mode 100644 index 050c60f0..00000000 Binary files a/images/sso/azure/azure-step5.png and /dev/null differ diff --git a/images/sso/azure/client-secret-add-description.png b/images/sso/azure/client-secret-add-description.png new file mode 100644 index 00000000..c16e0ef9 Binary files /dev/null and b/images/sso/azure/client-secret-add-description.png differ diff --git a/images/sso/azure/client-secret-select-option.png b/images/sso/azure/client-secret-select-option.png new file mode 100644 index 00000000..e54b6dd4 Binary files /dev/null and b/images/sso/azure/client-secret-select-option.png differ diff --git a/images/sso/azure/config-app-permissions-added.png b/images/sso/azure/config-app-permissions-added.png new file mode 100644 index 00000000..044deb12 Binary files /dev/null and b/images/sso/azure/config-app-permissions-added.png differ diff --git a/images/sso/azure/config-app-permissions-grant-admin-consent.png b/images/sso/azure/config-app-permissions-grant-admin-consent.png new file mode 100644 index 00000000..bb2c7e95 Binary files /dev/null and b/images/sso/azure/config-app-permissions-grant-admin-consent.png differ diff --git a/images/sso/azure/config-app-permissions-microsoft-graph.png b/images/sso/azure/config-app-permissions-microsoft-graph.png new file mode 100644 index 00000000..15bd1821 Binary files /dev/null and b/images/sso/azure/config-app-permissions-microsoft-graph.png differ diff --git a/images/sso/azure/config-app-permissions-selected.png b/images/sso/azure/config-app-permissions-selected.png new file mode 100644 index 00000000..c020f24b Binary files /dev/null and b/images/sso/azure/config-app-permissions-selected.png differ diff --git a/images/sso/azure/redirect-rui-define-select-id-tokens.png b/images/sso/azure/redirect-rui-define-select-id-tokens.png new file mode 100644 index 00000000..0e8985d6 Binary files /dev/null and b/images/sso/azure/redirect-rui-define-select-id-tokens.png differ diff --git a/images/sso/azure/redirect-uri-web-configure.png b/images/sso/azure/redirect-uri-web-configure.png new file mode 100644 index 00000000..05f27e91 Binary files /dev/null and b/images/sso/azure/redirect-uri-web-configure.png differ diff --git a/images/sso/azure/register-app-name.png b/images/sso/azure/register-app-name.png new file mode 100644 index 00000000..aa4e542a Binary files /dev/null and b/images/sso/azure/register-app-name.png differ diff --git a/images/sso/azure/register-app-select-azure-ad.png b/images/sso/azure/register-app-select-azure-ad.png new file mode 100644 index 00000000..d2efcfb0 Binary files /dev/null and b/images/sso/azure/register-app-select-azure-ad.png differ diff --git a/images/sso/azure/sso-codefresh-generated-client-id.png b/images/sso/azure/sso-codefresh-generated-client-id.png new file mode 100644 index 00000000..86cb716e Binary files /dev/null and b/images/sso/azure/sso-codefresh-generated-client-id.png differ diff --git a/images/sso/azure/sso-codefresh-settings.png b/images/sso/azure/sso-codefresh-settings.png new file mode 100644 index 00000000..eabe5f6d Binary files /dev/null and b/images/sso/azure/sso-codefresh-settings.png differ